aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/sis900.tmpl585
-rw-r--r--Documentation/dvb/avermedia.txt10
-rw-r--r--Documentation/dvb/bt8xx.txt140
-rw-r--r--Documentation/dvb/get_dvb_firmware23
-rw-r--r--Documentation/dvb/readme.txt32
-rw-r--r--Documentation/feature-removal-schedule.txt7
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/README.ipw210012
-rw-r--r--Documentation/networking/README.ipw220044
-rw-r--r--Documentation/networking/sis900.txt257
-rw-r--r--Documentation/video4linux/CARDLIST.cx882
-rw-r--r--Documentation/video4linux/CARDLIST.em28xx1
-rw-r--r--Documentation/video4linux/CARDLIST.saa71349
-rw-r--r--Documentation/video4linux/CARDLIST.tuner6
-rw-r--r--Documentation/video4linux/README.cpia2130
-rw-r--r--Documentation/video4linux/cpia2_overview.txt38
-rw-r--r--arch/arm/mach-realview/core.c28
-rw-r--r--arch/ppc/platforms/hdpu.c5
-rw-r--r--arch/sparc/kernel/ioport.c40
-rw-r--r--arch/sparc64/Kconfig18
-rw-r--r--arch/sparc64/defconfig25
-rw-r--r--arch/sparc64/kernel/Makefile8
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c14
-rw-r--r--arch/sparc64/kernel/binfmt_elf32.c4
-rw-r--r--arch/sparc64/kernel/cpu.c7
-rw-r--r--arch/sparc64/kernel/devices.c189
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S170
-rw-r--r--arch/sparc64/kernel/dtlb_base.S109
-rw-r--r--arch/sparc64/kernel/dtlb_miss.S39
-rw-r--r--arch/sparc64/kernel/ebus.c3
-rw-r--r--arch/sparc64/kernel/entry.S331
-rw-r--r--arch/sparc64/kernel/etrap.S170
-rw-r--r--arch/sparc64/kernel/head.S254
-rw-r--r--arch/sparc64/kernel/irq.c339
-rw-r--r--arch/sparc64/kernel/itlb_base.S79
-rw-r--r--arch/sparc64/kernel/itlb_miss.S39
-rw-r--r--arch/sparc64/kernel/ktlb.S363
-rw-r--r--arch/sparc64/kernel/pci.c13
-rw-r--r--arch/sparc64/kernel/pci_common.c301
-rw-r--r--arch/sparc64/kernel/pci_iommu.c36
-rw-r--r--arch/sparc64/kernel/pci_psycho.c23
-rw-r--r--arch/sparc64/kernel/pci_sabre.c23
-rw-r--r--arch/sparc64/kernel/pci_schizo.c24
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c1147
-rw-r--r--arch/sparc64/kernel/pci_sun4v.h31
-rw-r--r--arch/sparc64/kernel/pci_sun4v_asm.S95
-rw-r--r--arch/sparc64/kernel/process.c133
-rw-r--r--arch/sparc64/kernel/ptrace.c3
-rw-r--r--arch/sparc64/kernel/rtrap.S115
-rw-r--r--arch/sparc64/kernel/sbus.c10
-rw-r--r--arch/sparc64/kernel/setup.c409
-rw-r--r--arch/sparc64/kernel/smp.c418
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c26
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S334
-rw-r--r--arch/sparc64/kernel/sun4v_tlb_miss.S421
-rw-r--r--arch/sparc64/kernel/sys_sparc.c297
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c9
-rw-r--r--arch/sparc64/kernel/time.c373
-rw-r--r--arch/sparc64/kernel/trampoline.S238
-rw-r--r--arch/sparc64/kernel/traps.c403
-rw-r--r--arch/sparc64/kernel/tsb.S442
-rw-r--r--arch/sparc64/kernel/ttable.S63
-rw-r--r--arch/sparc64/kernel/unaligned.c45
-rw-r--r--arch/sparc64/kernel/us2e_cpufreq.c11
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c11
-rw-r--r--arch/sparc64/kernel/visemul.c894
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S16
-rw-r--r--arch/sparc64/kernel/winfixup.S480
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/NGbzero.S163
-rw-r--r--arch/sparc64/lib/NGcopy_from_user.S37
-rw-r--r--arch/sparc64/lib/NGcopy_to_user.S40
-rw-r--r--arch/sparc64/lib/NGmemcpy.S368
-rw-r--r--arch/sparc64/lib/NGpage.S96
-rw-r--r--arch/sparc64/lib/NGpatch.S33
-rw-r--r--arch/sparc64/lib/U3patch.S3
-rw-r--r--arch/sparc64/lib/bzero.S18
-rw-r--r--arch/sparc64/lib/clear_page.S12
-rw-r--r--arch/sparc64/lib/copy_page.S7
-rw-r--r--arch/sparc64/lib/delay.c19
-rw-r--r--arch/sparc64/lib/xor.S300
-rw-r--r--arch/sparc64/math-emu/math.c24
-rw-r--r--arch/sparc64/mm/Makefile2
-rw-r--r--arch/sparc64/mm/fault.c15
-rw-r--r--arch/sparc64/mm/generic.c40
-rw-r--r--arch/sparc64/mm/hugetlbpage.c179
-rw-r--r--arch/sparc64/mm/init.c1431
-rw-r--r--arch/sparc64/mm/tlb.c64
-rw-r--r--arch/sparc64/mm/tsb.c440
-rw-r--r--arch/sparc64/mm/ultra.S374
-rw-r--r--arch/sparc64/prom/cif.S211
-rw-r--r--arch/sparc64/prom/console.c6
-rw-r--r--arch/sparc64/prom/init.c60
-rw-r--r--arch/sparc64/prom/misc.c44
-rw-r--r--arch/sparc64/prom/p1275.c11
-rw-r--r--arch/sparc64/prom/tree.c9
-rw-r--r--arch/sparc64/solaris/misc.c4
-rw-r--r--block/as-iosched.c144
-rw-r--r--block/cfq-iosched.c354
-rw-r--r--block/deadline-iosched.c116
-rw-r--r--block/elevator.c171
-rw-r--r--block/ll_rw_blk.c105
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/media/common/Makefile1
-rw-r--r--drivers/media/common/ir-common.c519
-rw-r--r--drivers/media/common/ir-functions.c272
-rw-r--r--drivers/media/common/ir-keymaps.c1415
-rw-r--r--drivers/media/common/saa7146_core.c9
-rw-r--r--drivers/media/common/saa7146_fops.c24
-rw-r--r--drivers/media/common/saa7146_i2c.c4
-rw-r--r--drivers/media/common/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146_video.c30
-rw-r--r--drivers/media/dvb/b2c2/flexcop-common.h4
-rw-r--r--drivers/media/dvb/b2c2/flexcop-i2c.c6
-rw-r--r--drivers/media/dvb/bt8xx/Makefile2
-rw-r--r--drivers/media/dvb/bt8xx/bt878.c4
-rw-r--r--drivers/media/dvb/bt8xx/bt878.h4
-rw-r--r--drivers/media/dvb/bt8xx/dst.c14
-rw-r--r--drivers/media/dvb/bt8xx/dst_ca.c6
-rw-r--r--drivers/media/dvb/bt8xx/dst_common.h3
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c45
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.h3
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c51
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c795
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.h36
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c104
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.h4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c21
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c14
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ringbuffer.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ringbuffer.h1
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c4
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-init.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h9
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c4
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig12
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/bcm3510.c9
-rw-r--r--drivers/media/dvb/frontends/bsbe1.h123
-rw-r--r--drivers/media/dvb/frontends/bsru6.h140
-rw-r--r--drivers/media/dvb/frontends/cx24110.c13
-rw-r--r--drivers/media/dvb/frontends/cx24110.h1
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.c61
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.h7
-rw-r--r--drivers/media/dvb/frontends/lnbp21.h139
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c25
-rw-r--r--drivers/media/dvb/frontends/tda1004x.h3
-rw-r--r--drivers/media/dvb/frontends/zl10353.c311
-rw-r--r--drivers/media/dvb/frontends/zl10353.h43
-rw-r--r--drivers/media/dvb/frontends/zl10353_priv.h42
-rw-r--r--drivers/media/dvb/ttpci/av7110.c260
-rw-r--r--drivers/media/dvb/ttpci/av7110.h7
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.c40
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c11
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c4
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c136
-rw-r--r--drivers/media/dvb/ttpci/budget-patch.c99
-rw-r--r--drivers/media/dvb/ttpci/budget.c250
-rw-r--r--drivers/media/dvb/ttpci/budget.h4
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c35
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c31
-rw-r--r--drivers/media/radio/miropcm20-rds-core.c11
-rw-r--r--drivers/media/radio/radio-aimslab.c20
-rw-r--r--drivers/media/radio/radio-aztech.c12
-rw-r--r--drivers/media/radio/radio-maestro.c11
-rw-r--r--drivers/media/radio/radio-maxiradio.c11
-rw-r--r--drivers/media/radio/radio-sf16fmi.c22
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c22
-rw-r--r--drivers/media/radio/radio-typhoon.c12
-rw-r--r--drivers/media/radio/radio-zoltrix.c26
-rw-r--r--drivers/media/video/Kconfig54
-rw-r--r--drivers/media/video/Makefile15
-rw-r--r--drivers/media/video/arv.c16
-rw-r--r--drivers/media/video/bttv-cards.c16
-rw-r--r--drivers/media/video/bttv-driver.c48
-rw-r--r--drivers/media/video/bttv-input.c248
-rw-r--r--drivers/media/video/bttv-risc.c17
-rw-r--r--drivers/media/video/bw-qcam.c16
-rw-r--r--drivers/media/video/bw-qcam.h2
-rw-r--r--drivers/media/video/c-qcam.c19
-rw-r--r--drivers/media/video/cpia.c102
-rw-r--r--drivers/media/video/cpia.h5
-rw-r--r--drivers/media/video/cpia2/Kconfig9
-rw-r--r--drivers/media/video/cpia2/Makefile3
-rw-r--r--drivers/media/video/cpia2/cpia2.h497
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c2525
-rw-r--r--drivers/media/video/cpia2/cpia2_registers.h476
-rw-r--r--drivers/media/video/cpia2/cpia2_usb.c907
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c2079
-rw-r--r--drivers/media/video/cpia2/cpia2dev.h50
-rw-r--r--drivers/media/video/cpia2/cpia2patch.h233
-rw-r--r--drivers/media/video/cx25840/Kconfig9
-rw-r--r--drivers/media/video/cx25840/Makefile2
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c46
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c6
-rw-r--r--drivers/media/video/cx25840/cx25840.h1
-rw-r--r--drivers/media/video/cx88/Kconfig11
-rw-r--r--drivers/media/video/cx88/Makefile1
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c10
-rw-r--r--drivers/media/video/cx88/cx88-cards.c111
-rw-r--r--drivers/media/video/cx88/cx88-core.c9
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c108
-rw-r--r--drivers/media/video/cx88/cx88-input.c339
-rw-r--r--drivers/media/video/cx88/cx88-video.c57
-rw-r--r--drivers/media/video/cx88/cx88.h8
-rw-r--r--drivers/media/video/dpc7146.c58
-rw-r--r--drivers/media/video/em28xx/Kconfig1
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c72
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c85
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c1613
-rw-r--r--drivers/media/video/em28xx/em28xx.h9
-rw-r--r--drivers/media/video/hexium_gemini.c10
-rw-r--r--drivers/media/video/hexium_orion.c18
-rw-r--r--drivers/media/video/ir-kbd-i2c.c50
-rw-r--r--drivers/media/video/meye.c112
-rw-r--r--drivers/media/video/meye.h4
-rw-r--r--drivers/media/video/msp3400-driver.c76
-rw-r--r--drivers/media/video/msp3400-kthreads.c333
-rw-r--r--drivers/media/video/msp3400.h10
-rw-r--r--drivers/media/video/mxb.c150
-rw-r--r--drivers/media/video/mxb.h2
-rw-r--r--drivers/media/video/planb.c8
-rw-r--r--drivers/media/video/planb.h2
-rw-r--r--drivers/media/video/pms.c28
-rw-r--r--drivers/media/video/saa5246a.c10
-rw-r--r--drivers/media/video/saa5249.c10
-rw-r--r--drivers/media/video/saa7115.c107
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c9
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c294
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c31
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c182
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c8
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c507
-rw-r--r--drivers/media/video/saa7134/saa7134-oss.c46
-rw-r--r--drivers/media/video/saa7134/saa7134-tvaudio.c14
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c70
-rw-r--r--drivers/media/video/saa7134/saa7134.h16
-rw-r--r--drivers/media/video/tda8290.c8
-rw-r--r--drivers/media/video/tda9840.c3
-rw-r--r--drivers/media/video/tea6415c.c5
-rw-r--r--drivers/media/video/tea6420.c5
-rw-r--r--drivers/media/video/tuner-core.c69
-rw-r--r--drivers/media/video/tuner-simple.c166
-rw-r--r--drivers/media/video/tuner-types.c599
-rw-r--r--drivers/media/video/tvaudio.c26
-rw-r--r--drivers/media/video/tvp5150.c681
-rw-r--r--drivers/media/video/tvp5150_reg.h125
-rw-r--r--drivers/media/video/v4l2-common.c558
-rw-r--r--drivers/media/video/video-buf-dvb.c10
-rw-r--r--drivers/media/video/video-buf.c59
-rw-r--r--drivers/media/video/videodev.c22
-rw-r--r--drivers/media/video/vino.c33
-rw-r--r--drivers/net/3c509.c70
-rw-r--r--drivers/net/3c523.c9
-rw-r--r--drivers/net/3c59x.c7
-rw-r--r--drivers/net/7990.c2
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c4
-rw-r--r--drivers/net/82596.c2
-rw-r--r--drivers/net/Kconfig29
-rw-r--r--drivers/net/apne.c7
-rw-r--r--drivers/net/arcnet/Kconfig4
-rw-r--r--drivers/net/arcnet/arc-rawmode.c2
-rw-r--r--drivers/net/arcnet/arc-rimi.c68
-rw-r--r--drivers/net/arcnet/arcnet.c20
-rw-r--r--drivers/net/arcnet/com90xx.c132
-rw-r--r--drivers/net/arcnet/rfc1051.c2
-rw-r--r--drivers/net/arcnet/rfc1201.c2
-rw-r--r--drivers/net/arm/etherh.c3
-rw-r--r--drivers/net/bnx2.c10
-rw-r--r--drivers/net/bnx2_fw.h84
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c45
-rw-r--r--drivers/net/bonding/bond_sysfs.c6
-rw-r--r--drivers/net/bonding/bonding.h33
-rw-r--r--drivers/net/chelsio/espi.c14
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/dgrs.c2
-rw-r--r--drivers/net/dgrs_firmware.c4
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/e100.c6
-rw-r--r--drivers/net/e1000/e1000.h68
-rw-r--r--drivers/net/e1000/e1000_ethtool.c110
-rw-r--r--drivers/net/e1000/e1000_hw.c734
-rw-r--r--drivers/net/e1000/e1000_hw.h319
-rw-r--r--drivers/net/e1000/e1000_main.c609
-rw-r--r--drivers/net/e1000/e1000_param.c2
-rw-r--r--drivers/net/eepro100.c4
-rw-r--r--drivers/net/epic100.c4
-rw-r--r--drivers/net/eth16i.c11
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/forcedeth.c593
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hp100.c35
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c40
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.h2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_debug.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.h2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.c7
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.h2
-rw-r--r--drivers/net/irda/Kconfig4
-rw-r--r--drivers/net/macsonic.c2
-rw-r--r--drivers/net/mv643xx_eth.c1558
-rw-r--r--drivers/net/mv643xx_eth.h250
-rw-r--r--drivers/net/natsemi.c192
-rw-r--r--drivers/net/ne-h8300.c5
-rw-r--r--drivers/net/ne.c7
-rw-r--r--drivers/net/ne2.c7
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/ns83820.c7
-rw-r--r--drivers/net/oaknet.c3
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c5
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c3
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c4
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/plip.c4
-rw-r--r--drivers/net/ppp_async.c3
-rw-r--r--drivers/net/ppp_synctty.c2
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/s2io.c619
-rw-r--r--drivers/net/s2io.h55
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/sb1250-mac.c109
-rw-r--r--drivers/net/seeq8005.c5
-rw-r--r--drivers/net/sgiseeq.c17
-rw-r--r--drivers/net/shaper.c3
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c8
-rw-r--r--drivers/net/sk98lin/h/skaddr.h48
-rw-r--r--drivers/net/sk98lin/h/skcsum.h6
-rw-r--r--drivers/net/sk98lin/h/skgeinit.h56
-rw-r--r--drivers/net/sk98lin/h/skgepnmi.h4
-rw-r--r--drivers/net/sk98lin/h/skgesirq.h1
-rw-r--r--drivers/net/sk98lin/h/ski2c.h3
-rw-r--r--drivers/net/sk98lin/h/skvpd.h15
-rw-r--r--drivers/net/sk98lin/skaddr.c35
-rw-r--r--drivers/net/sk98lin/skgeinit.c148
-rw-r--r--drivers/net/sk98lin/skgemib.c7
-rw-r--r--drivers/net/sk98lin/skgepnmi.c153
-rw-r--r--drivers/net/sk98lin/skgesirq.c24
-rw-r--r--drivers/net/sk98lin/ski2c.c6
-rw-r--r--drivers/net/sk98lin/sklm80.c72
-rw-r--r--drivers/net/sk98lin/skrlmt.c1
-rw-r--r--drivers/net/sk98lin/skvpd.c108
-rw-r--r--drivers/net/sk98lin/skxmac2.c461
-rw-r--r--drivers/net/skfp/fplustm.c14
-rw-r--r--drivers/net/skfp/pcmplc.c4
-rw-r--r--drivers/net/skfp/skfddi.c2
-rw-r--r--drivers/net/starfire.c40
-rw-r--r--drivers/net/sundance.c10
-rw-r--r--drivers/net/sungem_phy.c2
-rw-r--r--drivers/net/tg3.c8
-rw-r--r--drivers/net/tokenring/lanstreamer.c3
-rw-r--r--drivers/net/tokenring/olympic.c9
-rw-r--r--drivers/net/tulip/de2104x.c18
-rw-r--r--drivers/net/tulip/pnic.c3
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c9
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/hostess_sv11.c1
-rw-r--r--drivers/net/wan/sealevel.c1
-rw-r--r--drivers/net/wireless/Kconfig32
-rw-r--r--drivers/net/wireless/airo.c338
-rw-r--r--drivers/net/wireless/atmel.c110
-rw-r--r--drivers/net/wireless/ipw2100.c266
-rw-r--r--drivers/net/wireless/ipw2100.h17
-rw-r--r--drivers/net/wireless/ipw2200.c1239
-rw-r--r--drivers/net/wireless/ipw2200.h103
-rw-r--r--drivers/net/wireless/netwave_cs.c2
-rw-r--r--drivers/net/wireless/strip.c4
-rw-r--r--drivers/net/wireless/wavelan.p.h6
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h9
-rw-r--r--drivers/net/yellowfin.c6
-rw-r--r--drivers/net/zorro8390.c7
-rw-r--r--drivers/s390/block/dcssblk.c8
-rw-r--r--drivers/sbus/char/bbc_i2c.c4
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c197
-rw-r--r--drivers/scsi/ata_piix.c392
-rw-r--r--drivers/scsi/libata-bmdma.c703
-rw-r--r--drivers/scsi/libata-core.c2794
-rw-r--r--drivers/scsi/libata-scsi.c240
-rw-r--r--drivers/scsi/libata.h3
-rw-r--r--drivers/scsi/pdc_adma.c6
-rw-r--r--drivers/scsi/sata_mv.c281
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c129
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c126
-rw-r--r--drivers/scsi/sata_sil24.c102
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_sx4.c25
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/scsi/scsi_error.c7
-rw-r--r--drivers/serial/Kconfig7
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/sunhv.c550
-rw-r--r--drivers/serial/sunsab.c19
-rw-r--r--drivers/serial/sunsu.c26
-rw-r--r--drivers/serial/sunzilog.c35
-rw-r--r--fs/jfs/Makefile3
-rw-r--r--fs/jfs/acl.c7
-rw-r--r--fs/jfs/file.c1
-rw-r--r--fs/jfs/inode.c15
-rw-r--r--fs/jfs/ioctl.c107
-rw-r--r--fs/jfs/jfs_dinode.h31
-rw-r--r--fs/jfs/jfs_dmap.c8
-rw-r--r--fs/jfs/jfs_dmap.h2
-rw-r--r--fs/jfs/jfs_dtree.c13
-rw-r--r--fs/jfs/jfs_extent.c20
-rw-r--r--fs/jfs/jfs_imap.c72
-rw-r--r--fs/jfs/jfs_imap.h4
-rw-r--r--fs/jfs/jfs_incore.h10
-rw-r--r--fs/jfs/jfs_inode.c46
-rw-r--r--fs/jfs/jfs_inode.h3
-rw-r--r--fs/jfs/jfs_lock.h1
-rw-r--r--fs/jfs/jfs_logmgr.c35
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_metapage.c3
-rw-r--r--fs/jfs/jfs_superblock.h9
-rw-r--r--fs/jfs/jfs_txnmgr.c36
-rw-r--r--fs/jfs/namei.c99
-rw-r--r--fs/jfs/super.c98
-rw-r--r--fs/jfs/xattr.c8
-rw-r--r--include/asm-arm/irq.h2
-rw-r--r--include/asm-sparc/idprom.h26
-rw-r--r--include/asm-sparc/oplib.h2
-rw-r--r--include/asm-sparc/uaccess.h47
-rw-r--r--include/asm-sparc64/a.out.h6
-rw-r--r--include/asm-sparc64/asi.h18
-rw-r--r--include/asm-sparc64/cpudata.h203
-rw-r--r--include/asm-sparc64/elf.h22
-rw-r--r--include/asm-sparc64/head.h15
-rw-r--r--include/asm-sparc64/hypervisor.h2128
-rw-r--r--include/asm-sparc64/idprom.h12
-rw-r--r--include/asm-sparc64/intr_queue.h15
-rw-r--r--include/asm-sparc64/irq.h4
-rw-r--r--include/asm-sparc64/mmu.h36
-rw-r--r--include/asm-sparc64/mmu_context.h162
-rw-r--r--include/asm-sparc64/numnodes.h6
-rw-r--r--include/asm-sparc64/oplib.h43
-rw-r--r--include/asm-sparc64/page.h13
-rw-r--r--include/asm-sparc64/pbm.h3
-rw-r--r--include/asm-sparc64/pci.h56
-rw-r--r--include/asm-sparc64/pgalloc.h166
-rw-r--r--include/asm-sparc64/pgtable.h704
-rw-r--r--include/asm-sparc64/pil.h4
-rw-r--r--include/asm-sparc64/processor.h23
-rw-r--r--include/asm-sparc64/pstate.h9
-rw-r--r--include/asm-sparc64/scratchpad.h14
-rw-r--r--include/asm-sparc64/smp.h30
-rw-r--r--include/asm-sparc64/sparsemem.h12
-rw-r--r--include/asm-sparc64/spitfire.h1
-rw-r--r--include/asm-sparc64/system.h7
-rw-r--r--include/asm-sparc64/thread_info.h9
-rw-r--r--include/asm-sparc64/timex.h6
-rw-r--r--include/asm-sparc64/tlbflush.h25
-rw-r--r--include/asm-sparc64/tsb.h281
-rw-r--r--include/asm-sparc64/ttable.h272
-rw-r--r--include/asm-sparc64/uaccess.h46
-rw-r--r--include/asm-sparc64/vdev.h16
-rw-r--r--include/asm-sparc64/xor.h34
-rw-r--r--include/linux/amba/clcd.h12
-rw-r--r--include/linux/arcdevice.h9
-rw-r--r--include/linux/ata.h22
-rw-r--r--include/linux/blkdev.h10
-rw-r--r--include/linux/dvb/audio.h13
-rw-r--r--include/linux/dvb/video.h13
-rw-r--r--include/linux/elevator.h10
-rw-r--r--include/linux/if.h3
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/libata.h180
-rw-r--r--include/linux/mv643xx.h27
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/videodev2.h84
-rw-r--r--include/media/ir-common.h40
-rw-r--r--include/media/saa7146.h21
-rw-r--r--include/media/tuner-types.h3
-rw-r--r--include/media/tuner.h6
-rw-r--r--include/media/v4l2-common.h62
-rw-r--r--include/media/video-buf-dvb.h2
-rw-r--r--include/media/video-buf.h2
-rw-r--r--include/net/ieee80211.h177
-rw-r--r--include/net/ieee80211_crypt.h3
-rw-r--r--include/scsi/scsi_eh.h3
-rw-r--r--kernel/exit.c7
-rw-r--r--net/Kconfig3
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c36
-rw-r--r--net/ieee80211/ieee80211_crypt.c11
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c8
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c56
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c5
-rw-r--r--net/ieee80211/ieee80211_geo.c48
-rw-r--r--net/ieee80211/ieee80211_module.c20
-rw-r--r--net/ieee80211/ieee80211_rx.c167
-rw-r--r--net/ieee80211/ieee80211_tx.c30
-rw-r--r--net/ieee80211/ieee80211_wx.c152
-rw-r--r--net/socket.c9
521 files changed, 38674 insertions, 17876 deletions
diff --git a/.gitignore b/.gitignore
index 3f8fb686b59c..53e53f2791f8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,3 +30,5 @@ include/linux/autoconf.h
30include/linux/compile.h 30include/linux/compile.h
31include/linux/version.h 31include/linux/version.h
32 32
33# stgit generated dirs
34patches-*
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 1c955883cf58..2975291e296a 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -9,7 +9,7 @@
9DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \ 9DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
10 kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ 10 kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
11 procfs-guide.xml writing_usb_driver.xml \ 11 procfs-guide.xml writing_usb_driver.xml \
12 sis900.xml kernel-api.xml journal-api.xml lsm.xml usb.xml \ 12 kernel-api.xml journal-api.xml lsm.xml usb.xml \
13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml 13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml
14 14
15### 15###
diff --git a/Documentation/DocBook/sis900.tmpl b/Documentation/DocBook/sis900.tmpl
deleted file mode 100644
index 6c2cbac93c3f..000000000000
--- a/Documentation/DocBook/sis900.tmpl
+++ /dev/null
@@ -1,585 +0,0 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4
5<book id="SiS900Guide">
6
7<bookinfo>
8
9<title>SiS 900/7016 Fast Ethernet Device Driver</title>
10
11<authorgroup>
12<author>
13<firstname>Ollie</firstname>
14<surname>Lho</surname>
15</author>
16
17<author>
18<firstname>Lei Chun</firstname>
19<surname>Chang</surname>
20</author>
21</authorgroup>
22
23<edition>Document Revision: 0.3 for SiS900 driver v1.06 &amp; v1.07</edition>
24<pubdate>November 16, 2000</pubdate>
25
26<copyright>
27 <year>1999</year>
28 <holder>Silicon Integrated System Corp.</holder>
29</copyright>
30
31<legalnotice>
32 <para>
33 This program is free software; you can redistribute it and/or modify
34 it under the terms of the GNU General Public License as published by
35 the Free Software Foundation; either version 2 of the License, or
36 (at your option) any later version.
37 </para>
38
39 <para>
40 This program is distributed in the hope that it will be useful,
41 but WITHOUT ANY WARRANTY; without even the implied warranty of
42 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
43 GNU General Public License for more details.
44 </para>
45
46 <para>
47 You should have received a copy of the GNU General Public License
48 along with this program; if not, write to the Free Software
49 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
50 </para>
51</legalnotice>
52
53<abstract>
54<para>
55This document gives some information on installation and usage of SiS 900/7016
56device driver under Linux.
57</para>
58</abstract>
59
60</bookinfo>
61
62<toc></toc>
63
64<chapter id="intro">
65 <title>Introduction</title>
66
67<para>
68This document describes the revision 1.06 and 1.07 of SiS 900/7016 Fast Ethernet
69device driver under Linux. The driver is developed by Silicon Integrated
70System Corp. and distributed freely under the GNU General Public License (GPL).
71The driver can be compiled as a loadable module and used under Linux kernel
72version 2.2.x. (rev. 1.06)
73With minimal changes, the driver can also be used under 2.3.x and 2.4.x kernel
74(rev. 1.07), please see
75<xref linkend="install"/>. If you are intended to
76use the driver for earlier kernels, you are on your own.
77</para>
78
79<para>
80The driver is tested with usual TCP/IP applications including
81FTP, Telnet, Netscape etc. and is used constantly by the developers.
82</para>
83
84<para>
85Please send all comments/fixes/questions to
86<ulink url="mailto:lcchang@sis.com.tw">Lei-Chun Chang</ulink>.
87</para>
88</chapter>
89
90<chapter id="changes">
91 <title>Changes</title>
92
93<para>
94Changes made in Revision 1.07
95
96<orderedlist>
97<listitem>
98<para>
99Separation of sis900.c and sis900.h in order to move most
100constant definition to sis900.h (many of those constants were
101corrected)
102</para>
103</listitem>
104
105<listitem>
106<para>
107Clean up PCI detection, the pci-scan from Donald Becker were not used,
108just simple pci&lowbar;find&lowbar;*.
109</para>
110</listitem>
111
112<listitem>
113<para>
114MII detection is modified to support multiple mii transceiver.
115</para>
116</listitem>
117
118<listitem>
119<para>
120Bugs in read&lowbar;eeprom, mdio&lowbar;* were removed.
121</para>
122</listitem>
123
124<listitem>
125<para>
126Lot of sis900 irrelevant comments were removed/changed and
127more comments were added to reflect the real situation.
128</para>
129</listitem>
130
131<listitem>
132<para>
133Clean up of physical/virtual address space mess in buffer
134descriptors.
135</para>
136</listitem>
137
138<listitem>
139<para>
140Better transmit/receive error handling.
141</para>
142</listitem>
143
144<listitem>
145<para>
146The driver now uses zero-copy single buffer management
147scheme to improve performance.
148</para>
149</listitem>
150
151<listitem>
152<para>
153Names of variables were changed to be more consistent.
154</para>
155</listitem>
156
157<listitem>
158<para>
159Clean up of auo-negotiation and timer code.
160</para>
161</listitem>
162
163<listitem>
164<para>
165Automatic detection and change of PHY on the fly.
166</para>
167</listitem>
168
169<listitem>
170<para>
171Bug in mac probing fixed.
172</para>
173</listitem>
174
175<listitem>
176<para>
177Fix 630E equalier problem by modifying the equalizer workaround rule.
178</para>
179</listitem>
180
181<listitem>
182<para>
183Support for ICS1893 10/100 Interated PHYceiver.
184</para>
185</listitem>
186
187<listitem>
188<para>
189Support for media select by ifconfig.
190</para>
191</listitem>
192
193<listitem>
194<para>
195Added kernel-doc extratable documentation.
196</para>
197</listitem>
198
199</orderedlist>
200</para>
201</chapter>
202
203<chapter id="tested">
204 <title>Tested Environment</title>
205
206<para>
207This driver is developed on the following hardware
208
209<itemizedlist>
210<listitem>
211
212<para>
213Intel Celeron 500 with SiS 630 (rev 02) chipset
214</para>
215</listitem>
216<listitem>
217
218<para>
219SiS 900 (rev 01) and SiS 7016/7014 Fast Ethernet Card
220</para>
221</listitem>
222
223</itemizedlist>
224
225and tested with these software environments
226
227<itemizedlist>
228<listitem>
229
230<para>
231Red Hat Linux version 6.2
232</para>
233</listitem>
234<listitem>
235
236<para>
237Linux kernel version 2.4.0
238</para>
239</listitem>
240<listitem>
241
242<para>
243Netscape version 4.6
244</para>
245</listitem>
246<listitem>
247
248<para>
249NcFTP 3.0.0 beta 18
250</para>
251</listitem>
252<listitem>
253
254<para>
255Samba version 2.0.3
256</para>
257</listitem>
258
259</itemizedlist>
260
261</para>
262
263</chapter>
264
265<chapter id="files">
266<title>Files in This Package</title>
267
268<para>
269In the package you can find these files:
270</para>
271
272<para>
273<variablelist>
274
275<varlistentry>
276<term>sis900.c</term>
277<listitem>
278<para>
279Driver source file in C
280</para>
281</listitem>
282</varlistentry>
283
284<varlistentry>
285<term>sis900.h</term>
286<listitem>
287<para>
288Header file for sis900.c
289</para>
290</listitem>
291</varlistentry>
292
293<varlistentry>
294<term>sis900.sgml</term>
295<listitem>
296<para>
297DocBook SGML source of the document
298</para>
299</listitem>
300</varlistentry>
301
302<varlistentry>
303<term>sis900.txt</term>
304<listitem>
305<para>
306Driver document in plain text
307</para>
308</listitem>
309</varlistentry>
310
311</variablelist>
312</para>
313</chapter>
314
315<chapter id="install">
316 <title>Installation</title>
317
318<para>
319Silicon Integrated System Corp. is cooperating closely with core Linux Kernel
320developers. The revisions of SiS 900 driver are distributed by the usuall channels
321for kernel tar files and patches. Those kernel tar files for official kernel and
322patches for kernel pre-release can be download at
323<ulink url="http://ftp.kernel.org/pub/linux/kernel/">official kernel ftp site</ulink>
324and its mirrors.
325The 1.06 revision can be found in kernel version later than 2.3.15 and pre-2.2.14,
326and 1.07 revision can be found in kernel version 2.4.0.
327If you have no prior experience in networking under Linux, please read
328<ulink url="http://www.tldp.org/">Ethernet HOWTO</ulink> and
329<ulink url="http://www.tldp.org/">Networking HOWTO</ulink> available from
330Linux Documentation Project (LDP).
331</para>
332
333<para>
334The driver is bundled in release later than 2.2.11 and 2.3.15 so this
335is the most easy case.
336Be sure you have the appropriate packages for compiling kernel source.
337Those packages are listed in Document/Changes in kernel source
338distribution. If you have to install the driver other than those bundled
339in kernel release, you should have your driver file
340<filename>sis900.c</filename> and <filename>sis900.h</filename>
341copied into <filename class="directory">/usr/src/linux/drivers/net/</filename> first.
342There are two alternative ways to install the driver
343</para>
344
345<sect1>
346<title>Building the driver as loadable module</title>
347
348<para>
349To build the driver as a loadable kernel module you have to reconfigure
350the kernel to activate network support by
351</para>
352
353<para><screen>
354make menuconfig
355</screen></para>
356
357<para>
358Choose <quote>Loadable module support ---></quote>,
359then select <quote>Enable loadable module support</quote>.
360</para>
361
362<para>
363Choose <quote>Network Device Support ---></quote>, select
364<quote>Ethernet (10 or 100Mbit)</quote>.
365Then select <quote>EISA, VLB, PCI and on board controllers</quote>,
366and choose <quote>SiS 900/7016 PCI Fast Ethernet Adapter support</quote>
367to <quote>M</quote>.
368</para>
369
370<para>
371After reconfiguring the kernel, you can make the driver module by
372</para>
373
374<para><screen>
375make modules
376</screen></para>
377
378<para>
379The driver should be compiled with no errors. After compiling the driver,
380the driver can be installed to proper place by
381</para>
382
383<para><screen>
384make modules_install
385</screen></para>
386
387<para>
388Load the driver into kernel by
389</para>
390
391<para><screen>
392insmod sis900
393</screen></para>
394
395<para>
396When loading the driver into memory, some information message can be view by
397</para>
398
399<para>
400<screen>
401dmesg
402</screen>
403
404or
405
406<screen>
407cat /var/log/message
408</screen>
409</para>
410
411<para>
412If the driver is loaded properly you will have messages similar to this:
413</para>
414
415<para><screen>
416sis900.c: v1.07.06 11/07/2000
417eth0: SiS 900 PCI Fast Ethernet at 0xd000, IRQ 10, 00:00:e8:83:7f:a4.
418eth0: SiS 900 Internal MII PHY transceiver found at address 1.
419eth0: Using SiS 900 Internal MII PHY as default
420</screen></para>
421
422<para>
423showing the version of the driver and the results of probing routine.
424</para>
425
426<para>
427Once the driver is loaded, network can be brought up by
428</para>
429
430<para><screen>
431/sbin/ifconfig eth0 IPADDR broadcast BROADCAST netmask NETMASK media TYPE
432</screen></para>
433
434<para>
435where IPADDR, BROADCAST, NETMASK are your IP address, broadcast address and
436netmask respectively. TYPE is used to set medium type used by the device.
437Typical values are "10baseT"(twisted-pair 10Mbps Ethernet) or "100baseT"
438(twisted-pair 100Mbps Ethernet). For more information on how to configure
439network interface, please refer to
440<ulink url="http://www.tldp.org/">Networking HOWTO</ulink>.
441</para>
442
443<para>
444The link status is also shown by kernel messages. For example, after the
445network interface is activated, you may have the message:
446</para>
447
448<para><screen>
449eth0: Media Link On 100mbps full-duplex
450</screen></para>
451
452<para>
453If you try to unplug the twist pair (TP) cable you will get
454</para>
455
456<para><screen>
457eth0: Media Link Off
458</screen></para>
459
460<para>
461indicating that the link is failed.
462</para>
463</sect1>
464
465<sect1>
466<title>Building the driver into kernel</title>
467
468<para>
469If you want to make the driver into kernel, choose <quote>Y</quote>
470rather than <quote>M</quote> on
471<quote>SiS 900/7016 PCI Fast Ethernet Adapter support</quote>
472when configuring the kernel. Build the kernel image in the usual way
473</para>
474
475<para><screen>
476make clean
477
478make bzlilo
479</screen></para>
480
481<para>
482Next time the system reboot, you have the driver in memory.
483</para>
484
485</sect1>
486</chapter>
487
488<chapter id="problems">
489 <title>Known Problems and Bugs</title>
490
491<para>
492There are some known problems and bugs. If you find any other bugs please
493mail to <ulink url="mailto:lcchang@sis.com.tw">lcchang@sis.com.tw</ulink>
494
495<orderedlist>
496
497<listitem>
498<para>
499AM79C901 HomePNA PHY is not thoroughly tested, there may be some
500bugs in the <quote>on the fly</quote> change of transceiver.
501</para>
502</listitem>
503
504<listitem>
505<para>
506A bug is hidden somewhere in the receive buffer management code,
507the bug causes NULL pointer reference in the kernel. This fault is
508caught before bad things happen and reported with the message:
509
510<computeroutput>
511eth0: NULL pointer encountered in Rx ring, skipping
512</computeroutput>
513
514which can be viewed with <literal remap="tt">dmesg</literal> or
515<literal remap="tt">cat /var/log/message</literal>.
516</para>
517</listitem>
518
519<listitem>
520<para>
521The media type change from 10Mbps to 100Mbps twisted-pair ethernet
522by ifconfig causes the media link down.
523</para>
524</listitem>
525
526</orderedlist>
527</para>
528</chapter>
529
530<chapter id="RHistory">
531 <title>Revision History</title>
532
533<para>
534<itemizedlist>
535
536<listitem>
537<para>
538November 13, 2000, Revision 1.07, seventh release, 630E problem fixed
539and further clean up.
540</para>
541</listitem>
542
543<listitem>
544<para>
545November 4, 1999, Revision 1.06, Second release, lots of clean up
546and optimization.
547</para>
548</listitem>
549
550<listitem>
551<para>
552August 8, 1999, Revision 1.05, Initial Public Release
553</para>
554</listitem>
555
556</itemizedlist>
557</para>
558</chapter>
559
560<chapter id="acknowledgements">
561 <title>Acknowledgements</title>
562
563<para>
564This driver was originally derived form
565<ulink url="mailto:becker@cesdis1.gsfc.nasa.gov">Donald Becker</ulink>'s
566<ulink url="ftp://cesdis.gsfc.nasa.gov/pub/linux/drivers/kern-2.3/pci-skeleton.c"
567>pci-skeleton</ulink> and
568<ulink url="ftp://cesdis.gsfc.nasa.gov/pub/linux/drivers/kern-2.3/rtl8139.c"
569>rtl8139</ulink> drivers. Donald also provided various suggestion
570regarded with improvements made in revision 1.06.
571</para>
572
573<para>
574The 1.05 revision was created by
575<ulink url="mailto:cmhuang@sis.com.tw">Jim Huang</ulink>, AMD 79c901
576support was added by <ulink url="mailto:lcs@sis.com.tw">Chin-Shan Li</ulink>.
577</para>
578</chapter>
579
580<chapter id="functions">
581<title>List of Functions</title>
582!Idrivers/net/sis900.c
583</chapter>
584
585</book>
diff --git a/Documentation/dvb/avermedia.txt b/Documentation/dvb/avermedia.txt
index 068070ff13cd..8bab8461a4af 100644
--- a/Documentation/dvb/avermedia.txt
+++ b/Documentation/dvb/avermedia.txt
@@ -1,4 +1,3 @@
1
2HOWTO: Get An Avermedia DVB-T working under Linux 1HOWTO: Get An Avermedia DVB-T working under Linux
3 ______________________________________________ 2 ______________________________________________
4 3
@@ -137,11 +136,8 @@ Getting the card going
137 To power up the card, load the following modules in the 136 To power up the card, load the following modules in the
138 following order: 137 following order:
139 138
140 * insmod dvb-core.o 139 * modprobe bttv (normally loaded automatically)
141 * modprobe bttv.o 140 * modprobe dvb-bt8xx (or place dvb-bt8xx in /etc/modules)
142 * insmod bt878.o
143 * insmod dvb-bt8xx.o
144 * insmod sp887x.o
145 141
146 Insertion of these modules into the running kernel will 142 Insertion of these modules into the running kernel will
147 activate the appropriate DVB device nodes. It is then possible 143 activate the appropriate DVB device nodes. It is then possible
@@ -302,4 +298,4 @@ Further Update
302 Many thanks to Nigel Pearson for the updates to this document 298 Many thanks to Nigel Pearson for the updates to this document
303 since the recent revision of the driver. 299 since the recent revision of the driver.
304 300
305 January 29th 2004 301 February 14th 2006
diff --git a/Documentation/dvb/bt8xx.txt b/Documentation/dvb/bt8xx.txt
index 52ed462061df..4e7614e606c5 100644
--- a/Documentation/dvb/bt8xx.txt
+++ b/Documentation/dvb/bt8xx.txt
@@ -1,118 +1,78 @@
1How to get the Nebula, PCTV, FusionHDTV Lite and Twinhan DST cards working 1How to get the bt8xx cards working
2========================================================================== 2==================================
3 3
4This class of cards has a bt878a as the PCI interface, and 41) General information
5require the bttv driver. 5======================
6 6
7Please pay close attention to the warning about the bttv module 7This class of cards has a bt878a as the PCI interface, and require the bttv driver
8options below for the DST card. 8for accessing the i2c bus and the gpio pins of the bt8xx chipset.
9Please see Documentation/dvb/cards.txt => o Cards based on the Conexant Bt8xx PCI bridge:
9 10
101) General informations 11Compiling kernel please enable:
11======================= 12a.)"Device drivers" => "Multimedia devices" => "Video For Linux" => "BT848 Video For Linux"
12 13b.)"Device drivers" => "Multimedia devices" => "Digital Video Broadcasting Devices"
13These drivers require the bttv driver to provide the means to access 14 => "DVB for Linux" "DVB Core Support" "Bt8xx based PCI Cards"
14the i2c bus and the gpio pins of the bt8xx chipset.
15
16Because of this, you need to enable
17"Device drivers" => "Multimedia devices"
18 => "Video For Linux" => "BT848 Video For Linux"
19
20Furthermore you need to enable
21"Device drivers" => "Multimedia devices" => "Digital Video Broadcasting Devices"
22 => "DVB for Linux" "DVB Core Support" "BT8xx based PCI cards"
23 15
242) Loading Modules 162) Loading Modules
25================== 17==================
26 18
27In general you need to load the bttv driver, which will handle the gpio and 19In default cases bttv is loaded automatically.
28i2c communication for us, plus the common dvb-bt8xx device driver. 20To load the backend either place dvb-bt8xx in etc/modules, or apply manually:
29The frontends for Nebula (nxt6000), Pinnacle PCTV (cx24110), TwinHan (dst),
30FusionHDTV DVB-T Lite (mt352) and FusionHDTV5 Lite (lgdt330x) are loaded
31automatically by the dvb-bt8xx device driver.
32
333a) Nebula / Pinnacle PCTV / FusionHDTV Lite
34---------------------------------------------
35
36 $ modprobe bttv (normally bttv is being loaded automatically by kmod)
37 $ modprobe dvb-bt8xx
38
39(or just place dvb-bt8xx in /etc/modules for automatic loading)
40
41
423b) TwinHan and Clones
43--------------------------
44 21
45 $ modprobe bttv card=0x71 22 $ modprobe dvb-bt8xx
46 $ modprobe dvb-bt8xx
47 $ modprobe dst
48 23
49The value 0x71 will override the PCI type detection for dvb-bt8xx, 24All frontends will be loaded automatically.
50which is necessary for TwinHan cards. Omission of this parameter might result 25People running udev please see Documentation/dvb/udev.txt.
51in a system lockup.
52 26
53If you're having an older card (blue color PCB) and card=0x71 locks up 27In the following cases overriding the PCI type detection for dvb-bt8xx might be necessary:
54your machine, try using 0x68, too. If that does not work, ask on the
55mailing list.
56 28
57The DST module takes a couple of useful parameters. 292a) Running TwinHan and Clones
30------------------------------
58 31
59verbose takes values 0 to 4. These values control the verbosity level, 32 $ modprobe bttv card=113
60and can be used to debug also. 33 $ modprobe dvb-bt8xx
34 $ modprobe dst
61 35
62verbose=0 means complete disabling of messages 36Useful parameters for verbosity level and debugging the dst module:
63 1 only error messages are displayed
64 2 notifications are also displayed
65 3 informational messages are also displayed
66 4 debug setting
67 37
68dst_addons takes values 0 and 0x20. A value of 0 means it is a FTA card. 38verbose=0: messages are disabled
690x20 means it has a Conditional Access slot. 39 1: only error messages are displayed
40 2: notifications are displayed
41 3: other useful messages are displayed
42 4: debug setting
43dst_addons=0: card is a free to air (FTA) card only
44 0x20: card has a conditional access slot for scrambled channels
70 45
71The autodetected values are determined by the cards 'response string' 46The autodetected values are determined by the cards' "response string".
72which you can see in your logs e.g. 47In your logs see f. ex.: dst_get_device_id: Recognize [DSTMCI].
48For bug reports please send in a complete log with verbose=4 activated.
49Please also see Documentation/dvb/ci.txt.
73 50
74dst_get_device_id: Recognise [DSTMCI] 512b) Running multiple cards
75
76If you need to sent in bug reports on the dst, please do send in a complete
77log with the verbose=4 module parameter. For general usage, the default setting
78of verbose=1 is ideal.
79
80
814) Multiple cards
82-------------------------- 52--------------------------
83 53
84If you happen to be running multiple cards, it would be advisable to load 54Examples of card ID's:
85the bttv module with the card id. This would help to solve any module loading
86problems that you might face.
87
88For example, if you have a Twinhan and Clones card along with a FusionHDTV5 Lite
89 55
90 $ modprobe bttv card=0x71 card=0x87 56Pinnacle PCTV Sat: 94
91 57Nebula Electronics Digi TV: 104
92Here the order of the card id is important and should be the same as that of the 58pcHDTV HD-2000 TV: 112
93physical order of the cards. Here card=0x71 represents the Twinhan and clones 59Twinhan DST and clones: 113
94and card=0x87 represents Fusion HDTV5 Lite. These arguments can also be 60Avermedia AverTV DVB-T 771: 123
95specified in decimal, rather than hex: 61Avermedia AverTV DVB-T 761: 124
62DViCO FusionHDTV DVB-T Lite: 128
63DViCO FusionHDTV 5 Lite: 135
96 64
65Notice: The order of the card ID should be uprising:
66Example:
97 $ modprobe bttv card=113 card=135 67 $ modprobe bttv card=113 card=135
68 $ modprobe dvb-bt8xx
98 69
99Some examples of card-id's 70For a full list of card ID's please see Documentation/video4linux/CARDLIST.bttv.
100 71In case of further problems send questions to the mailing list: www.linuxdvb.org.
101Pinnacle Sat 0x5e (94)
102Nebula Digi TV 0x68 (104)
103PC HDTV 0x70 (112)
104Twinhan 0x71 (113)
105FusionHDTV DVB-T Lite 0x80 (128)
106FusionHDTV5 Lite 0x87 (135)
107
108For a full list of card-id's, see the V4L Documentation within the kernel
109source: linux/Documentation/video4linux/CARDLIST.bttv
110
111If you have problems with this please do ask on the mailing list.
112 72
113--
114Authors: Richard Walker, 73Authors: Richard Walker,
115 Jamie Honan, 74 Jamie Honan,
116 Michael Hunold, 75 Michael Hunold,
117 Manu Abraham, 76 Manu Abraham,
77 Uwe Bugla,
118 Michael Krufky 78 Michael Krufky
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index 75c28a174092..bb55f49f2745 100644
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -21,8 +21,9 @@
21use File::Temp qw/ tempdir /; 21use File::Temp qw/ tempdir /;
22use IO::Handle; 22use IO::Handle;
23 23
24@components = ( "sp8870", "sp887x", "tda10045", "tda10046", "av7110", "dec2000t", 24@components = ( "sp8870", "sp887x", "tda10045", "tda10046",
25 "dec2540t", "dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004", 25 "tda10046lifeview", "av7110", "dec2000t", "dec2540t",
26 "dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004",
26 "or51211", "or51132_qam", "or51132_vsb", "bluebird"); 27 "or51211", "or51132_qam", "or51132_vsb", "bluebird");
27 28
28# Check args 29# Check args
@@ -126,6 +127,24 @@ sub tda10046 {
126 $outfile; 127 $outfile;
127} 128}
128 129
130sub tda10046lifeview {
131 my $sourcefile = "Drv_2.11.02.zip";
132 my $url = "http://www.lifeview.com.tw/drivers/pci_card/FlyDVB-T/$sourcefile";
133 my $hash = "1ea24dee4eea8fe971686981f34fd2e0";
134 my $outfile = "dvb-fe-tda10046.fw";
135 my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
136
137 checkstandard();
138
139 wgetfile($sourcefile, $url);
140 unzip($sourcefile, $tmpdir);
141 extract("$tmpdir/LVHybrid.sys", 0x8b088, 24602, "$tmpdir/fwtmp");
142 verify("$tmpdir/fwtmp", $hash);
143 copy("$tmpdir/fwtmp", $outfile);
144
145 $outfile;
146}
147
129sub av7110 { 148sub av7110 {
130 my $sourcefile = "dvb-ttpci-01.fw-261d"; 149 my $sourcefile = "dvb-ttpci-01.fw-261d";
131 my $url = "http://www.linuxtv.org/downloads/firmware/$sourcefile"; 150 my $url = "http://www.linuxtv.org/downloads/firmware/$sourcefile";
diff --git a/Documentation/dvb/readme.txt b/Documentation/dvb/readme.txt
index f5c50b22de3b..0b0380c91990 100644
--- a/Documentation/dvb/readme.txt
+++ b/Documentation/dvb/readme.txt
@@ -20,11 +20,23 @@ http://linuxtv.org/downloads/
20 20
21What's inside this directory: 21What's inside this directory:
22 22
23"avermedia.txt"
24contains detailed information about the
25Avermedia DVB-T cards. See also "bt8xx.txt".
26
27"bt8xx.txt"
28contains detailed information about the
29various bt8xx based "budget" DVB cards.
30
23"cards.txt" 31"cards.txt"
24contains a list of supported hardware. 32contains a list of supported hardware.
25 33
34"ci.txt"
35contains detailed information about the
36CI module as part from TwinHan cards and Clones.
37
26"contributors.txt" 38"contributors.txt"
27is the who-is-who of DVB development 39is the who-is-who of DVB development.
28 40
29"faq.txt" 41"faq.txt"
30contains frequently asked questions and their answers. 42contains frequently asked questions and their answers.
@@ -34,19 +46,17 @@ script to download and extract firmware for those devices
34that require it. 46that require it.
35 47
36"ttusb-dec.txt" 48"ttusb-dec.txt"
37contains detailed informations about the 49contains detailed information about the
38TT DEC2000/DEC3000 USB DVB hardware. 50TT DEC2000/DEC3000 USB DVB hardware.
39 51
40"bt8xx.txt"
41contains detailed installation instructions for the
42various bt8xx based "budget" DVB cards
43(Nebula, Pinnacle PCTV, Twinhan DST)
44
45"README.dibusb"
46contains detailed information about adapters
47based on DiBcom reference design.
48
49"udev.txt" 52"udev.txt"
50how to get DVB and udev up and running. 53how to get DVB and udev up and running.
51 54
55"README.dvb-usb"
56contains detailed information about the DVB USB cards.
57
58"README.flexcop"
59contains detailed information about the
60Technisat- and Flexcop B2C2 drivers.
61
52Good luck and have fun! 62Good luck and have fun!
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 81bc51369f59..28a31c5e2289 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -151,6 +151,13 @@ Who: Ralf Baechle <ralf@linux-mips.org>
151 151
152--------------------------- 152---------------------------
153 153
154What: eepro100 network driver
155When: January 2007
156Why: replaced by the e100 driver
157Who: Adrian Bunk <bunk@stusta.de>
158
159---------------------------
160
154What: Legacy /proc/pci interface (PCI_LEGACY_PROC) 161What: Legacy /proc/pci interface (PCI_LEGACY_PROC)
155When: March 2006 162When: March 2006
156Why: deprecated since 2.5.53 in favor of lspci(8) 163Why: deprecated since 2.5.53 in favor of lspci(8)
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 5b01d5cc4e95..b1181ce232d9 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -92,8 +92,6 @@ routing.txt
92 - the new routing mechanism 92 - the new routing mechanism
93shaper.txt 93shaper.txt
94 - info on the module that can shape/limit transmitted traffic. 94 - info on the module that can shape/limit transmitted traffic.
95sis900.txt
96 - SiS 900/7016 Fast Ethernet device driver info.
97sk98lin.txt 95sk98lin.txt
98 - Marvell Yukon Chipset / SysKonnect SK-98xx compliant Gigabit 96 - Marvell Yukon Chipset / SysKonnect SK-98xx compliant Gigabit
99 Ethernet Adapter family driver info 97 Ethernet Adapter family driver info
diff --git a/Documentation/networking/README.ipw2100 b/Documentation/networking/README.ipw2100
index 3ab40379d1cf..f3fcaa41f774 100644
--- a/Documentation/networking/README.ipw2100
+++ b/Documentation/networking/README.ipw2100
@@ -3,18 +3,18 @@ Intel(R) PRO/Wireless 2100 Driver for Linux in support of:
3 3
4Intel(R) PRO/Wireless 2100 Network Connection 4Intel(R) PRO/Wireless 2100 Network Connection
5 5
6Copyright (C) 2003-2005, Intel Corporation 6Copyright (C) 2003-2006, Intel Corporation
7 7
8README.ipw2100 8README.ipw2100
9 9
10Version: 1.1.3 10Version: git-1.1.5
11Date : October 17, 2005 11Date : January 25, 2006
12 12
13Index 13Index
14----------------------------------------------- 14-----------------------------------------------
150. IMPORTANT INFORMATION BEFORE USING THIS DRIVER 150. IMPORTANT INFORMATION BEFORE USING THIS DRIVER
161. Introduction 161. Introduction
172. Release 1.1.3 Current Features 172. Release git-1.1.5 Current Features
183. Command Line Parameters 183. Command Line Parameters
194. Sysfs Helper Files 194. Sysfs Helper Files
205. Radio Kill Switch 205. Radio Kill Switch
@@ -89,7 +89,7 @@ potential fixes and patches, as well as links to the development mailing list
89for the driver project. 89for the driver project.
90 90
91 91
922. Release 1.1.3 Current Supported Features 922. Release git-1.1.5 Current Supported Features
93----------------------------------------------- 93-----------------------------------------------
94- Managed (BSS) and Ad-Hoc (IBSS) 94- Managed (BSS) and Ad-Hoc (IBSS)
95- WEP (shared key and open) 95- WEP (shared key and open)
@@ -270,7 +270,7 @@ For installation support on the ipw2100 1.1.0 driver on Linux kernels
2709. License 2709. License
271----------------------------------------------- 271-----------------------------------------------
272 272
273 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. 273 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
274 274
275 This program is free software; you can redistribute it and/or modify it 275 This program is free software; you can redistribute it and/or modify it
276 under the terms of the GNU General Public License (version 2) as 276 under the terms of the GNU General Public License (version 2) as
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200
index c6492d3839fa..acb30c5dcff3 100644
--- a/Documentation/networking/README.ipw2200
+++ b/Documentation/networking/README.ipw2200
@@ -10,7 +10,7 @@ both hardware adapters listed above. In this document the Intel(R)
10PRO/Wireless 2915ABG Driver for Linux will be used to reference the 10PRO/Wireless 2915ABG Driver for Linux will be used to reference the
11unified driver. 11unified driver.
12 12
13Copyright (C) 2004-2005, Intel Corporation 13Copyright (C) 2004-2006, Intel Corporation
14 14
15README.ipw2200 15README.ipw2200
16 16
@@ -26,9 +26,11 @@ Index
261.2. Module parameters 261.2. Module parameters
271.3. Wireless Extension Private Methods 271.3. Wireless Extension Private Methods
281.4. Sysfs Helper Files 281.4. Sysfs Helper Files
291.5. Supported channels
292. Ad-Hoc Networking 302. Ad-Hoc Networking
303. Interacting with Wireless Tools 313. Interacting with Wireless Tools
313.1. iwconfig mode 323.1. iwconfig mode
333.2. iwconfig sens
324. About the Version Numbers 344. About the Version Numbers
335. Firmware installation 355. Firmware installation
346. Support 366. Support
@@ -314,6 +316,35 @@ For the device level files, see /sys/bus/pci/drivers/ipw2200:
314 running ifconfig and is therefore disabled by default. 316 running ifconfig and is therefore disabled by default.
315 317
316 318
3191.5. Supported channels
320-----------------------------------------------
321
322Upon loading the Intel(R) PRO/Wireless 2915ABG Driver for Linux, a
323message stating the detected geography code and the number of 802.11
324channels supported by the card will be displayed in the log.
325
326The geography code corresponds to a regulatory domain as shown in the
327table below.
328
329 Supported channels
330Code Geography 802.11bg 802.11a
331
332--- Restricted 11 0
333ZZF Custom US/Canada 11 8
334ZZD Rest of World 13 0
335ZZA Custom USA & Europe & High 11 13
336ZZB Custom NA & Europe 11 13
337ZZC Custom Japan 11 4
338ZZM Custom 11 0
339ZZE Europe 13 19
340ZZJ Custom Japan 14 4
341ZZR Rest of World 14 0
342ZZH High Band 13 4
343ZZG Custom Europe 13 4
344ZZK Europe 13 24
345ZZL Europe 11 13
346
347
3172. Ad-Hoc Networking 3482. Ad-Hoc Networking
318----------------------------------------------- 349-----------------------------------------------
319 350
@@ -353,6 +384,15 @@ When configuring the mode of the adapter, all run-time configured parameters
353are reset to the value used when the module was loaded. This includes 384are reset to the value used when the module was loaded. This includes
354channels, rates, ESSID, etc. 385channels, rates, ESSID, etc.
355 386
3873.2 iwconfig sens
388-----------------------------------------------
389
390The 'iwconfig ethX sens XX' command will not set the signal sensitivity
391threshold, as described in iwconfig documentation, but rather the number
392of consecutive missed beacons that will trigger handover, i.e. roaming
393to another access point. At the same time, it will set the disassociation
394threshold to 3 times the given value.
395
356 396
3574. About the Version Numbers 3974. About the Version Numbers
358----------------------------------------------- 398-----------------------------------------------
@@ -408,7 +448,7 @@ For general information and support, go to:
4087. License 4487. License
409----------------------------------------------- 449-----------------------------------------------
410 450
411 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. 451 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
412 452
413 This program is free software; you can redistribute it and/or modify it 453 This program is free software; you can redistribute it and/or modify it
414 under the terms of the GNU General Public License version 2 as 454 under the terms of the GNU General Public License version 2 as
diff --git a/Documentation/networking/sis900.txt b/Documentation/networking/sis900.txt
deleted file mode 100644
index bddffd7385ae..000000000000
--- a/Documentation/networking/sis900.txt
+++ /dev/null
@@ -1,257 +0,0 @@
1
2SiS 900/7016 Fast Ethernet Device Driver
3
4Ollie Lho
5
6Lei Chun Chang
7
8 Copyright © 1999 by Silicon Integrated System Corp.
9
10 This document gives some information on installation and usage of SiS
11 900/7016 device driver under Linux.
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or (at
16 your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 USA
27 _________________________________________________________________
28
29 Table of Contents
30 1. Introduction
31 2. Changes
32 3. Tested Environment
33 4. Files in This Package
34 5. Installation
35
36 Building the driver as loadable module
37 Building the driver into kernel
38
39 6. Known Problems and Bugs
40 7. Revision History
41 8. Acknowledgements
42 _________________________________________________________________
43
44Chapter 1. Introduction
45
46 This document describes the revision 1.06 and 1.07 of SiS 900/7016
47 Fast Ethernet device driver under Linux. The driver is developed by
48 Silicon Integrated System Corp. and distributed freely under the GNU
49 General Public License (GPL). The driver can be compiled as a loadable
50 module and used under Linux kernel version 2.2.x. (rev. 1.06) With
51 minimal changes, the driver can also be used under 2.3.x and 2.4.x
52 kernel (rev. 1.07), please see Chapter 5. If you are intended to use
53 the driver for earlier kernels, you are on your own.
54
55 The driver is tested with usual TCP/IP applications including FTP,
56 Telnet, Netscape etc. and is used constantly by the developers.
57
58 Please send all comments/fixes/questions to Lei-Chun Chang.
59 _________________________________________________________________
60
61Chapter 2. Changes
62
63 Changes made in Revision 1.07
64
65 1. Separation of sis900.c and sis900.h in order to move most constant
66 definition to sis900.h (many of those constants were corrected)
67 2. Clean up PCI detection, the pci-scan from Donald Becker were not
68 used, just simple pci_find_*.
69 3. MII detection is modified to support multiple mii transceiver.
70 4. Bugs in read_eeprom, mdio_* were removed.
71 5. Lot of sis900 irrelevant comments were removed/changed and more
72 comments were added to reflect the real situation.
73 6. Clean up of physical/virtual address space mess in buffer
74 descriptors.
75 7. Better transmit/receive error handling.
76 8. The driver now uses zero-copy single buffer management scheme to
77 improve performance.
78 9. Names of variables were changed to be more consistent.
79 10. Clean up of auo-negotiation and timer code.
80 11. Automatic detection and change of PHY on the fly.
81 12. Bug in mac probing fixed.
82 13. Fix 630E equalier problem by modifying the equalizer workaround
83 rule.
84 14. Support for ICS1893 10/100 Interated PHYceiver.
85 15. Support for media select by ifconfig.
86 16. Added kernel-doc extratable documentation.
87 _________________________________________________________________
88
89Chapter 3. Tested Environment
90
91 This driver is developed on the following hardware
92
93 * Intel Celeron 500 with SiS 630 (rev 02) chipset
94 * SiS 900 (rev 01) and SiS 7016/7014 Fast Ethernet Card
95
96 and tested with these software environments
97
98 * Red Hat Linux version 6.2
99 * Linux kernel version 2.4.0
100 * Netscape version 4.6
101 * NcFTP 3.0.0 beta 18
102 * Samba version 2.0.3
103 _________________________________________________________________
104
105Chapter 4. Files in This Package
106
107 In the package you can find these files:
108
109 sis900.c
110 Driver source file in C
111
112 sis900.h
113 Header file for sis900.c
114
115 sis900.sgml
116 DocBook SGML source of the document
117
118 sis900.txt
119 Driver document in plain text
120 _________________________________________________________________
121
122Chapter 5. Installation
123
124 Silicon Integrated System Corp. is cooperating closely with core Linux
125 Kernel developers. The revisions of SiS 900 driver are distributed by
126 the usuall channels for kernel tar files and patches. Those kernel tar
127 files for official kernel and patches for kernel pre-release can be
128 download at official kernel ftp site and its mirrors. The 1.06
129 revision can be found in kernel version later than 2.3.15 and
130 pre-2.2.14, and 1.07 revision can be found in kernel version 2.4.0. If
131 you have no prior experience in networking under Linux, please read
132 Ethernet HOWTO and Networking HOWTO available from Linux Documentation
133 Project (LDP).
134
135 The driver is bundled in release later than 2.2.11 and 2.3.15 so this
136 is the most easy case. Be sure you have the appropriate packages for
137 compiling kernel source. Those packages are listed in Document/Changes
138 in kernel source distribution. If you have to install the driver other
139 than those bundled in kernel release, you should have your driver file
140 sis900.c and sis900.h copied into /usr/src/linux/drivers/net/ first.
141 There are two alternative ways to install the driver
142 _________________________________________________________________
143
144Building the driver as loadable module
145
146 To build the driver as a loadable kernel module you have to
147 reconfigure the kernel to activate network support by
148
149make menuconfig
150
151 Choose "Loadable module support --->", then select "Enable loadable
152 module support".
153
154 Choose "Network Device Support --->", select "Ethernet (10 or
155 100Mbit)". Then select "EISA, VLB, PCI and on board controllers", and
156 choose "SiS 900/7016 PCI Fast Ethernet Adapter support" to "M".
157
158 After reconfiguring the kernel, you can make the driver module by
159
160make modules
161
162 The driver should be compiled with no errors. After compiling the
163 driver, the driver can be installed to proper place by
164
165make modules_install
166
167 Load the driver into kernel by
168
169insmod sis900
170
171 When loading the driver into memory, some information message can be
172 view by
173
174dmesg
175
176 or
177cat /var/log/message
178
179 If the driver is loaded properly you will have messages similar to
180 this:
181
182sis900.c: v1.07.06 11/07/2000
183eth0: SiS 900 PCI Fast Ethernet at 0xd000, IRQ 10, 00:00:e8:83:7f:a4.
184eth0: SiS 900 Internal MII PHY transceiver found at address 1.
185eth0: Using SiS 900 Internal MII PHY as default
186
187 showing the version of the driver and the results of probing routine.
188
189 Once the driver is loaded, network can be brought up by
190
191/sbin/ifconfig eth0 IPADDR broadcast BROADCAST netmask NETMASK media TYPE
192
193 where IPADDR, BROADCAST, NETMASK are your IP address, broadcast
194 address and netmask respectively. TYPE is used to set medium type used
195 by the device. Typical values are "10baseT"(twisted-pair 10Mbps
196 Ethernet) or "100baseT" (twisted-pair 100Mbps Ethernet). For more
197 information on how to configure network interface, please refer to
198 Networking HOWTO.
199
200 The link status is also shown by kernel messages. For example, after
201 the network interface is activated, you may have the message:
202
203eth0: Media Link On 100mbps full-duplex
204
205 If you try to unplug the twist pair (TP) cable you will get
206
207eth0: Media Link Off
208
209 indicating that the link is failed.
210 _________________________________________________________________
211
212Building the driver into kernel
213
214 If you want to make the driver into kernel, choose "Y" rather than "M"
215 on "SiS 900/7016 PCI Fast Ethernet Adapter support" when configuring
216 the kernel. Build the kernel image in the usual way
217
218make clean
219
220make bzlilo
221
222 Next time the system reboot, you have the driver in memory.
223 _________________________________________________________________
224
225Chapter 6. Known Problems and Bugs
226
227 There are some known problems and bugs. If you find any other bugs
228 please mail to lcchang@sis.com.tw
229
230 1. AM79C901 HomePNA PHY is not thoroughly tested, there may be some
231 bugs in the "on the fly" change of transceiver.
232 2. A bug is hidden somewhere in the receive buffer management code,
233 the bug causes NULL pointer reference in the kernel. This fault is
234 caught before bad things happen and reported with the message:
235 eth0: NULL pointer encountered in Rx ring, skipping which can be
236 viewed with dmesg or cat /var/log/message.
237 3. The media type change from 10Mbps to 100Mbps twisted-pair ethernet
238 by ifconfig causes the media link down.
239 _________________________________________________________________
240
241Chapter 7. Revision History
242
243 * November 13, 2000, Revision 1.07, seventh release, 630E problem
244 fixed and further clean up.
245 * November 4, 1999, Revision 1.06, Second release, lots of clean up
246 and optimization.
247 * August 8, 1999, Revision 1.05, Initial Public Release
248 _________________________________________________________________
249
250Chapter 8. Acknowledgements
251
252 This driver was originally derived form Donald Becker's pci-skeleton
253 and rtl8139 drivers. Donald also provided various suggestion regarded
254 with improvements made in revision 1.06.
255
256 The 1.05 revision was created by Jim Huang, AMD 79c901 support was
257 added by Chin-Shan Li.
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index 8bea3fbd0548..3b39a91b24bd 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -43,3 +43,5 @@
43 42 -> digitalnow DNTV Live! DVB-T Pro [1822:0025] 43 42 -> digitalnow DNTV Live! DVB-T Pro [1822:0025]
44 43 -> KWorld/VStream XPert DVB-T with cx22702 [17de:08a1] 44 43 -> KWorld/VStream XPert DVB-T with cx22702 [17de:08a1]
45 44 -> DViCO FusionHDTV DVB-T Dual Digital [18ac:db50,18ac:db54] 45 44 -> DViCO FusionHDTV DVB-T Dual Digital [18ac:db50,18ac:db54]
46 45 -> KWorld HardwareMpegTV XPert [17de:0840]
47 46 -> DViCO FusionHDTV DVB-T Hybrid [18ac:db40,18ac:db44]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index a0c7cad20971..a3026689bbe6 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -8,3 +8,4 @@
8 7 -> Leadtek Winfast USB II (em2800) 8 7 -> Leadtek Winfast USB II (em2800)
9 8 -> Kworld USB2800 (em2800) 9 8 -> Kworld USB2800 (em2800)
10 9 -> Pinnacle Dazzle DVC 90 (em2820/em2840) [2304:0207] 10 9 -> Pinnacle Dazzle DVC 90 (em2820/em2840) [2304:0207]
11 12 -> Kworld PVR TV 2800 RF (em2820/em2840)
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index da4fb890165f..8c7195455963 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -83,3 +83,12 @@
83 82 -> MSI TV@Anywhere plus [1462:6231] 83 82 -> MSI TV@Anywhere plus [1462:6231]
84 83 -> Terratec Cinergy 250 PCI TV [153b:1160] 84 83 -> Terratec Cinergy 250 PCI TV [153b:1160]
85 84 -> LifeView FlyDVB Trio [5168:0319] 85 84 -> LifeView FlyDVB Trio [5168:0319]
86 85 -> AverTV DVB-T 777 [1461:2c05]
87 86 -> LifeView FlyDVB-T [5168:0301]
88 87 -> ADS Instant TV Duo Cardbus PTV331 [0331:1421]
89 88 -> Tevion/KWorld DVB-T 220RF [17de:7201]
90 89 -> ELSA EX-VISION 700TV [1048:226c]
91 90 -> Kworld ATSC110 [17de:7350]
92 91 -> AVerMedia A169 B [1461:7360]
93 92 -> AVerMedia A169 B1 [1461:6360]
94 93 -> Medion 7134 Bridge #2 [16be:0005]
diff --git a/Documentation/video4linux/CARDLIST.tuner b/Documentation/video4linux/CARDLIST.tuner
index f6d0cf7b7922..1bcdac67dd8c 100644
--- a/Documentation/video4linux/CARDLIST.tuner
+++ b/Documentation/video4linux/CARDLIST.tuner
@@ -64,8 +64,10 @@ tuner=62 - Philips TEA5767HN FM Radio
64tuner=63 - Philips FMD1216ME MK3 Hybrid Tuner 64tuner=63 - Philips FMD1216ME MK3 Hybrid Tuner
65tuner=64 - LG TDVS-H062F/TUA6034 65tuner=64 - LG TDVS-H062F/TUA6034
66tuner=65 - Ymec TVF66T5-B/DFF 66tuner=65 - Ymec TVF66T5-B/DFF
67tuner=66 - LG NTSC (TALN mini series) 67tuner=66 - LG TALN series
68tuner=67 - Philips TD1316 Hybrid Tuner 68tuner=67 - Philips TD1316 Hybrid Tuner
69tuner=68 - Philips TUV1236D ATSC/NTSC dual in 69tuner=68 - Philips TUV1236D ATSC/NTSC dual in
70tuner=69 - Tena TNF 5335 MF 70tuner=69 - Tena TNF 5335 and similar models
71tuner=70 - Samsung TCPN 2121P30A 71tuner=70 - Samsung TCPN 2121P30A
72tuner=71 - Xceive xc3028
73tuner=72 - Thomson FE6600
diff --git a/Documentation/video4linux/README.cpia2 b/Documentation/video4linux/README.cpia2
new file mode 100644
index 000000000000..ce8213d28b67
--- /dev/null
+++ b/Documentation/video4linux/README.cpia2
@@ -0,0 +1,130 @@
1$Id: README,v 1.7 2005/08/29 23:39:57 sbertin Exp $
2
31. Introduction
4
5 This is a driver for STMicroelectronics's CPiA2 (second generation
6Colour Processor Interface ASIC) based cameras. This camera outputs an MJPEG
7stream at up to vga size. It implements the Video4Linux interface as much as
8possible. Since the V4L interface does not support compressed formats, only
9an mjpeg enabled application can be used with the camera. We have modified the
10gqcam application to view this stream.
11
12 The driver is implemented as two kernel modules. The cpia2 module
13contains the camera functions and the V4L interface. The cpia2_usb module
14contains usb specific functions. The main reason for this was the size of the
15module was getting out of hand, so I separted them. It is not likely that
16there will be a parallel port version.
17
18FEATURES:
19 - Supports cameras with the Vision stv6410 (CIF) and stv6500 (VGA) cmos
20 sensors. I only have the vga sensor, so can't test the other.
21 - Image formats: VGA, QVGA, CIF, QCIF, and a number of sizes in between.
22 VGA and QVGA are the native image sizes for the VGA camera. CIF is done
23 in the coprocessor by scaling QVGA. All other sizes are done by clipping.
24 - Palette: YCrCb, compressed with MJPEG.
25 - Some compression parameters are settable.
26 - Sensor framerate is adjustable (up to 30 fps CIF, 15 fps VGA).
27 - Adjust brightness, color, contrast while streaming.
28 - Flicker control settable for 50 or 60 Hz mains frequency.
29
302. Making and installing the stv672 driver modules:
31
32 Requirements:
33 -------------
34 This should work with 2.4 (2.4.23 and later) and 2.6 kernels, but has
35only been tested on 2.6. Video4Linux must be either compiled into the kernel or
36available as a module. Video4Linux2 is automatically detected and made
37available at compile time.
38
39 Compiling:
40 ----------
41 As root, do a make install. This will compile and install the modules
42into the media/video directory in the module tree. For 2.4 kernels, use
43Makefile_2.4 (aka do make -f Makefile_2.4 install).
44
45 Setup:
46 ------
47 Use 'modprobe cpia2' to load and 'modprobe -r cpia2' to unload. This
48may be done automatically by your distribution.
49
503. Driver options
51
52 Option Description
53 ------ -----------
54 video_nr video device to register (0=/dev/video0, etc)
55 range -1 to 64. default is -1 (first available)
56 If you have more than 1 camera, this MUST be -1.
57 buffer_size Size for each frame buffer in bytes (default 68k)
58 num_buffers Number of frame buffers (1-32, default 3)
59 alternate USB Alternate (2-7, default 7)
60 flicker_freq Frequency for flicker reduction(50 or 60, default 60)
61 flicker_mode 0 to disable, or 1 to enable flicker reduction.
62 (default 0). This is only effective if the camera
63 uses a stv0672 coprocessor.
64
65 Setting the options:
66 --------------------
67 If you are using modules, edit /etc/modules.conf and add an options
68line like this:
69 options cpia2 num_buffers=3 buffer_size=65535
70
71 If the driver is compiled into the kernel, at boot time specify them
72like this:
73 cpia2.num_buffers=3 cpia2.buffer_size=65535
74
75 What buffer size should I use?
76 ------------------------------
77 The maximum image size depends on the alternate you choose, and the
78frame rate achieved by the camera. If the compression engine is able to
79keep up with the frame rate, the maximum image size is given by the table
80below.
81 The compression engine starts out at maximum compression, and will
82increase image quality until it is close to the size in the table. As long
83as the compression engine can keep up with the frame rate, after a short time
84the images will all be about the size in the table, regardless of resolution.
85 At low alternate settings, the compression engine may not be able to
86compress the image enough and will reduce the frame rate by producing larger
87images.
88 The default of 68k should be good for most users. This will handle
89any alternate at frame rates down to 15fps. For lower frame rates, it may
90be necessary to increase the buffer size to avoid having frames dropped due
91to insufficient space.
92
93 Image size(bytes)
94 Alternate bytes/ms 15fps 30fps
95 2 128 8533 4267
96 3 384 25600 12800
97 4 640 42667 21333
98 5 768 51200 25600
99 6 896 59733 29867
100 7 1023 68200 34100
101
102 How many buffers should I use?
103 ------------------------------
104 For normal streaming, 3 should give the best results. With only 2,
105it is possible for the camera to finish sending one image just after a
106program has started reading the other. If this happens, the driver must drop
107a frame. The exception to this is if you have a heavily loaded machine. In
108this case use 2 buffers. You are probably not reading at the full frame rate.
109If the camera can send multiple images before a read finishes, it could
110overwrite the third buffer before the read finishes, leading to a corrupt
111image. Single and double buffering have extra checks to avoid overwriting.
112
1134. Using the camera
114
115 We are providing a modified gqcam application to view the output. In
116order to avoid confusion, here it is called mview. There is also the qx5view
117program which can also control the lights on the qx5 microscope. MJPEG Tools
118(http://mjpeg.sourceforge.net) can also be used to record from the camera.
119
1205. Notes to developers:
121
122 - This is a driver version stripped of the 2.4 back compatibility
123 and old MJPEG ioctl API. See cpia2.sf.net for 2.4 support.
124
1256. Thanks:
126
127 - Peter Pregler <Peter_Pregler@email.com>,
128 Scott J. Bertin <scottbertin@yahoo.com>, and
129 Jarl Totland <Jarl.Totland@bdc.no> for the original cpia driver, which
130 this one was modelled from.
diff --git a/Documentation/video4linux/cpia2_overview.txt b/Documentation/video4linux/cpia2_overview.txt
new file mode 100644
index 000000000000..a6e53665216b
--- /dev/null
+++ b/Documentation/video4linux/cpia2_overview.txt
@@ -0,0 +1,38 @@
1 Programmer's View of Cpia2
2
3Cpia2 is the second generation video coprocessor from VLSI Vision Ltd (now a
4division of ST Microelectronics). There are two versions. The first is the
5STV0672, which is capable of up to 30 frames per second (fps) in frame sizes
6up to CIF, and 15 fps for VGA frames. The STV0676 is an improved version,
7which can handle up to 30 fps VGA. Both coprocessors can be attached to two
8CMOS sensors - the vvl6410 CIF sensor and the vvl6500 VGA sensor. These will
9be referred to as the 410 and the 500 sensors, or the CIF and VGA sensors.
10
11The two chipsets operate almost identically. The core is an 8051 processor,
12running two different versions of firmware. The 672 runs the VP4 video
13processor code, the 676 runs VP5. There are a few differences in register
14mappings for the two chips. In these cases, the symbols defined in the
15header files are marked with VP4 or VP5 as part of the symbol name.
16
17The cameras appear externally as three sets of registers. Setting register
18values is the only way to control the camera. Some settings are
19interdependant, such as the sequence required to power up the camera. I will
20try to make note of all of these cases.
21
22The register sets are called blocks. Block 0 is the system block. This
23section is always powered on when the camera is plugged in. It contains
24registers that control housekeeping functions such as powering up the video
25processor. The video processor is the VP block. These registers control
26how the video from the sensor is processed. Examples are timing registers,
27user mode (vga, qvga), scaling, cropping, framerates, and so on. The last
28block is the video compressor (VC). The video stream sent from the camera is
29compressed as Motion JPEG (JPEGA). The VC controls all of the compression
30parameters. Looking at the file cpia2_registers.h, you can get a full view
31of these registers and the possible values for most of them.
32
33One or more registers can be set or read by sending a usb control message to
34the camera. There are three modes for this. Block mode requests a number
35of contiguous registers. Random mode reads or writes random registers with
36a tuple structure containing address/value pairs. The repeat mode is only
37used by VP4 to load a firmware patch. It contains a starting address and
38a sequence of bytes to be written into a gpio port. \ No newline at end of file
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 4303d988c4bf..d13270c5d7cd 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -202,11 +202,6 @@ struct clk realview_clcd_clk = {
202/* 202/*
203 * CLCD support. 203 * CLCD support.
204 */ 204 */
205#define SYS_CLCD_MODE_MASK (3 << 0)
206#define SYS_CLCD_MODE_888 (0 << 0)
207#define SYS_CLCD_MODE_5551 (1 << 0)
208#define SYS_CLCD_MODE_565_RLSB (2 << 0)
209#define SYS_CLCD_MODE_565_BLSB (3 << 0)
210#define SYS_CLCD_NLCDIOON (1 << 2) 205#define SYS_CLCD_NLCDIOON (1 << 2)
211#define SYS_CLCD_VDDPOSSWITCH (1 << 3) 206#define SYS_CLCD_VDDPOSSWITCH (1 << 3)
212#define SYS_CLCD_PWR3V5SWITCH (1 << 4) 207#define SYS_CLCD_PWR3V5SWITCH (1 << 4)
@@ -360,29 +355,10 @@ static void realview_clcd_enable(struct clcd_fb *fb)
360 void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET; 355 void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET;
361 u32 val; 356 u32 val;
362 357
363 val = readl(sys_clcd);
364 val &= ~SYS_CLCD_MODE_MASK;
365
366 switch (fb->fb.var.green.length) {
367 case 5:
368 val |= SYS_CLCD_MODE_5551;
369 break;
370 case 6:
371 val |= SYS_CLCD_MODE_565_RLSB;
372 break;
373 case 8:
374 val |= SYS_CLCD_MODE_888;
375 break;
376 }
377
378 /*
379 * Set the MUX
380 */
381 writel(val, sys_clcd);
382
383 /* 358 /*
384 * And now enable the PSUs 359 * Enable the PSUs
385 */ 360 */
361 val = readl(sys_clcd);
386 val |= SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH; 362 val |= SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH;
387 writel(val, sys_clcd); 363 writel(val, sys_clcd);
388} 364}
diff --git a/arch/ppc/platforms/hdpu.c b/arch/ppc/platforms/hdpu.c
index 50039a204c24..f945416960e9 100644
--- a/arch/ppc/platforms/hdpu.c
+++ b/arch/ppc/platforms/hdpu.c
@@ -319,11 +319,10 @@ static void __init hdpu_fixup_eth_pdata(struct platform_device *pd)
319 struct mv643xx_eth_platform_data *eth_pd; 319 struct mv643xx_eth_platform_data *eth_pd;
320 eth_pd = pd->dev.platform_data; 320 eth_pd = pd->dev.platform_data;
321 321
322 eth_pd->port_serial_control =
323 mv64x60_read(&bh, MV643XX_ETH_PORT_SERIAL_CONTROL_REG(pd->id) & ~1);
324
325 eth_pd->force_phy_addr = 1; 322 eth_pd->force_phy_addr = 1;
326 eth_pd->phy_addr = pd->id; 323 eth_pd->phy_addr = pd->id;
324 eth_pd->speed = SPEED_100;
325 eth_pd->duplex = DUPLEX_FULL;
327 eth_pd->tx_queue_size = 400; 326 eth_pd->tx_queue_size = 400;
328 eth_pd->rx_queue_size = 800; 327 eth_pd->rx_queue_size = 800;
329} 328}
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index d39c9f206271..460f72e640e6 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -217,7 +217,7 @@ static void _sparc_free_io(struct resource *res)
217 unsigned long plen; 217 unsigned long plen;
218 218
219 plen = res->end - res->start + 1; 219 plen = res->end - res->start + 1;
220 if ((plen & (PAGE_SIZE-1)) != 0) BUG(); 220 BUG_ON((plen & (PAGE_SIZE-1)) != 0);
221 sparc_unmapiorange(res->start, plen); 221 sparc_unmapiorange(res->start, plen);
222 release_resource(res); 222 release_resource(res);
223} 223}
@@ -512,8 +512,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
512dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, 512dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
513 int direction) 513 int direction)
514{ 514{
515 if (direction == PCI_DMA_NONE) 515 BUG_ON(direction == PCI_DMA_NONE);
516 BUG();
517 /* IIep is write-through, not flushing. */ 516 /* IIep is write-through, not flushing. */
518 return virt_to_phys(ptr); 517 return virt_to_phys(ptr);
519} 518}
@@ -528,8 +527,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
528void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, 527void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
529 int direction) 528 int direction)
530{ 529{
531 if (direction == PCI_DMA_NONE) 530 BUG_ON(direction == PCI_DMA_NONE);
532 BUG();
533 if (direction != PCI_DMA_TODEVICE) { 531 if (direction != PCI_DMA_TODEVICE) {
534 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 532 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
535 (size + PAGE_SIZE-1) & PAGE_MASK); 533 (size + PAGE_SIZE-1) & PAGE_MASK);
@@ -542,8 +540,7 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
542dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 540dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
543 unsigned long offset, size_t size, int direction) 541 unsigned long offset, size_t size, int direction)
544{ 542{
545 if (direction == PCI_DMA_NONE) 543 BUG_ON(direction == PCI_DMA_NONE);
546 BUG();
547 /* IIep is write-through, not flushing. */ 544 /* IIep is write-through, not flushing. */
548 return page_to_phys(page) + offset; 545 return page_to_phys(page) + offset;
549} 546}
@@ -551,8 +548,7 @@ dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
551void pci_unmap_page(struct pci_dev *hwdev, 548void pci_unmap_page(struct pci_dev *hwdev,
552 dma_addr_t dma_address, size_t size, int direction) 549 dma_addr_t dma_address, size_t size, int direction)
553{ 550{
554 if (direction == PCI_DMA_NONE) 551 BUG_ON(direction == PCI_DMA_NONE);
555 BUG();
556 /* mmu_inval_dma_area XXX */ 552 /* mmu_inval_dma_area XXX */
557} 553}
558 554
@@ -576,11 +572,10 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
576{ 572{
577 int n; 573 int n;
578 574
579 if (direction == PCI_DMA_NONE) 575 BUG_ON(direction == PCI_DMA_NONE);
580 BUG();
581 /* IIep is write-through, not flushing. */ 576 /* IIep is write-through, not flushing. */
582 for (n = 0; n < nents; n++) { 577 for (n = 0; n < nents; n++) {
583 if (page_address(sg->page) == NULL) BUG(); 578 BUG_ON(page_address(sg->page) == NULL);
584 sg->dvma_address = virt_to_phys(page_address(sg->page)); 579 sg->dvma_address = virt_to_phys(page_address(sg->page));
585 sg->dvma_length = sg->length; 580 sg->dvma_length = sg->length;
586 sg++; 581 sg++;
@@ -597,11 +592,10 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
597{ 592{
598 int n; 593 int n;
599 594
600 if (direction == PCI_DMA_NONE) 595 BUG_ON(direction == PCI_DMA_NONE);
601 BUG();
602 if (direction != PCI_DMA_TODEVICE) { 596 if (direction != PCI_DMA_TODEVICE) {
603 for (n = 0; n < nents; n++) { 597 for (n = 0; n < nents; n++) {
604 if (page_address(sg->page) == NULL) BUG(); 598 BUG_ON(page_address(sg->page) == NULL);
605 mmu_inval_dma_area( 599 mmu_inval_dma_area(
606 (unsigned long) page_address(sg->page), 600 (unsigned long) page_address(sg->page),
607 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 601 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
@@ -622,8 +616,7 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
622 */ 616 */
623void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 617void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
624{ 618{
625 if (direction == PCI_DMA_NONE) 619 BUG_ON(direction == PCI_DMA_NONE);
626 BUG();
627 if (direction != PCI_DMA_TODEVICE) { 620 if (direction != PCI_DMA_TODEVICE) {
628 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 621 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
629 (size + PAGE_SIZE-1) & PAGE_MASK); 622 (size + PAGE_SIZE-1) & PAGE_MASK);
@@ -632,8 +625,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t si
632 625
633void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 626void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
634{ 627{
635 if (direction == PCI_DMA_NONE) 628 BUG_ON(direction == PCI_DMA_NONE);
636 BUG();
637 if (direction != PCI_DMA_TODEVICE) { 629 if (direction != PCI_DMA_TODEVICE) {
638 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 630 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
639 (size + PAGE_SIZE-1) & PAGE_MASK); 631 (size + PAGE_SIZE-1) & PAGE_MASK);
@@ -650,11 +642,10 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int
650{ 642{
651 int n; 643 int n;
652 644
653 if (direction == PCI_DMA_NONE) 645 BUG_ON(direction == PCI_DMA_NONE);
654 BUG();
655 if (direction != PCI_DMA_TODEVICE) { 646 if (direction != PCI_DMA_TODEVICE) {
656 for (n = 0; n < nents; n++) { 647 for (n = 0; n < nents; n++) {
657 if (page_address(sg->page) == NULL) BUG(); 648 BUG_ON(page_address(sg->page) == NULL);
658 mmu_inval_dma_area( 649 mmu_inval_dma_area(
659 (unsigned long) page_address(sg->page), 650 (unsigned long) page_address(sg->page),
660 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 651 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
@@ -667,11 +658,10 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, i
667{ 658{
668 int n; 659 int n;
669 660
670 if (direction == PCI_DMA_NONE) 661 BUG_ON(direction == PCI_DMA_NONE);
671 BUG();
672 if (direction != PCI_DMA_TODEVICE) { 662 if (direction != PCI_DMA_TODEVICE) {
673 for (n = 0; n < nents; n++) { 663 for (n = 0; n < nents; n++) {
674 if (page_address(sg->page) == NULL) BUG(); 664 BUG_ON(page_address(sg->page) == NULL);
675 mmu_inval_dma_area( 665 mmu_inval_dma_area(
676 (unsigned long) page_address(sg->page), 666 (unsigned long) page_address(sg->page),
677 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 667 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 4c0a50a76554..c3685b314d71 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -186,6 +186,15 @@ endchoice
186 186
187endmenu 187endmenu
188 188
189config ARCH_SPARSEMEM_ENABLE
190 def_bool y
191
192config ARCH_SPARSEMEM_DEFAULT
193 def_bool y
194
195config LARGE_ALLOCS
196 def_bool y
197
189source "mm/Kconfig" 198source "mm/Kconfig"
190 199
191config GENERIC_ISA_DMA 200config GENERIC_ISA_DMA
@@ -350,6 +359,15 @@ config SOLARIS_EMUL
350 359
351endmenu 360endmenu
352 361
362config SCHED_SMT
363 bool "SMT (Hyperthreading) scheduler support"
364 depends on SMP
365 default y
366 help
367 SMT scheduler support improves the CPU scheduler's decision making
368 when dealing with UltraSPARC cpus at a cost of slightly increased
369 overhead in some places. If unsure say N here.
370
353config CMDLINE_BOOL 371config CMDLINE_BOOL
354 bool "Default bootloader kernel arguments" 372 bool "Default bootloader kernel arguments"
355 373
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 069d49777b2a..f819a9663a8d 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.16-rc2 3# Linux kernel version: 2.6.16
4# Tue Feb 7 17:47:18 2006 4# Mon Mar 20 01:23:21 2006
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -115,14 +115,20 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
115CONFIG_HUGETLB_PAGE_SIZE_4MB=y 115CONFIG_HUGETLB_PAGE_SIZE_4MB=y
116# CONFIG_HUGETLB_PAGE_SIZE_512K is not set 116# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
117# CONFIG_HUGETLB_PAGE_SIZE_64K is not set 117# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
118CONFIG_ARCH_SPARSEMEM_ENABLE=y
119CONFIG_ARCH_SPARSEMEM_DEFAULT=y
120CONFIG_LARGE_ALLOCS=y
118CONFIG_SELECT_MEMORY_MODEL=y 121CONFIG_SELECT_MEMORY_MODEL=y
119CONFIG_FLATMEM_MANUAL=y 122# CONFIG_FLATMEM_MANUAL is not set
120# CONFIG_DISCONTIGMEM_MANUAL is not set 123# CONFIG_DISCONTIGMEM_MANUAL is not set
121# CONFIG_SPARSEMEM_MANUAL is not set 124CONFIG_SPARSEMEM_MANUAL=y
122CONFIG_FLATMEM=y 125CONFIG_SPARSEMEM=y
123CONFIG_FLAT_NODE_MEM_MAP=y 126CONFIG_HAVE_MEMORY_PRESENT=y
124# CONFIG_SPARSEMEM_STATIC is not set 127# CONFIG_SPARSEMEM_STATIC is not set
128CONFIG_SPARSEMEM_EXTREME=y
129CONFIG_MEMORY_HOTPLUG=y
125CONFIG_SPLIT_PTLOCK_CPUS=4 130CONFIG_SPLIT_PTLOCK_CPUS=4
131CONFIG_MIGRATION=y
126CONFIG_GENERIC_ISA_DMA=y 132CONFIG_GENERIC_ISA_DMA=y
127CONFIG_SBUS=y 133CONFIG_SBUS=y
128CONFIG_SBUSCHAR=y 134CONFIG_SBUSCHAR=y
@@ -655,6 +661,7 @@ CONFIG_SERIAL_SUNCORE=y
655CONFIG_SERIAL_SUNSU=y 661CONFIG_SERIAL_SUNSU=y
656CONFIG_SERIAL_SUNSU_CONSOLE=y 662CONFIG_SERIAL_SUNSU_CONSOLE=y
657CONFIG_SERIAL_SUNSAB=m 663CONFIG_SERIAL_SUNSAB=m
664CONFIG_SERIAL_SUNHV=y
658CONFIG_SERIAL_CORE=y 665CONFIG_SERIAL_CORE=y
659CONFIG_SERIAL_CORE_CONSOLE=y 666CONFIG_SERIAL_CORE_CONSOLE=y
660# CONFIG_SERIAL_JSM is not set 667# CONFIG_SERIAL_JSM is not set
@@ -1116,11 +1123,7 @@ CONFIG_USB_HIDDEV=y
1116# CONFIG_INFINIBAND is not set 1123# CONFIG_INFINIBAND is not set
1117 1124
1118# 1125#
1119# SN Devices 1126# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1120#
1121
1122#
1123# EDAC - error detection and reporting (RAS)
1124# 1127#
1125 1128
1126# 1129#
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 83d67eb18895..6f6816488b04 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -11,10 +11,12 @@ obj-y := process.o setup.o cpu.o idprom.o \
11 traps.o devices.o auxio.o una_asm.o \ 11 traps.o devices.o auxio.o una_asm.o \
12 irq.o ptrace.o time.o sys_sparc.o signal.o \ 12 irq.o ptrace.o time.o sys_sparc.o signal.o \
13 unaligned.o central.o pci.o starfire.o semaphore.o \ 13 unaligned.o central.o pci.o starfire.o semaphore.o \
14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o 14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
15 visemul.o
15 16
16obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ 17obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
17 pci_psycho.o pci_sabre.o pci_schizo.o 18 pci_psycho.o pci_sabre.o pci_schizo.o \
19 pci_sun4v.o pci_sun4v_asm.o
18obj-$(CONFIG_SMP) += smp.o trampoline.o 20obj-$(CONFIG_SMP) += smp.o trampoline.o
19obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o 21obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
20obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o 22obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
@@ -38,5 +40,5 @@ else
38 CMODEL_CFLAG := -m64 -mcmodel=medlow 40 CMODEL_CFLAG := -m64 -mcmodel=medlow
39endif 41endif
40 42
41head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \ 43head.o: head.S ttable.S itlb_miss.S dtlb_miss.S ktlb.S tsb.S \
42 etrap.S rtrap.S winfixup.S entry.S 44 etrap.S rtrap.S winfixup.S entry.S
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
index 202a80c24b6f..d7caa60a0074 100644
--- a/arch/sparc64/kernel/binfmt_aout32.c
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -31,6 +31,7 @@
31#include <asm/system.h> 31#include <asm/system.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
34#include <asm/mmu_context.h>
34 35
35static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs); 36static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs);
36static int load_aout32_library(struct file*); 37static int load_aout32_library(struct file*);
@@ -238,6 +239,8 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
238 (current->mm->start_data = N_DATADDR(ex)); 239 (current->mm->start_data = N_DATADDR(ex));
239 current->mm->brk = ex.a_bss + 240 current->mm->brk = ex.a_bss +
240 (current->mm->start_brk = N_BSSADDR(ex)); 241 (current->mm->start_brk = N_BSSADDR(ex));
242 current->mm->free_area_cache = current->mm->mmap_base;
243 current->mm->cached_hole_size = 0;
241 244
242 current->mm->mmap = NULL; 245 current->mm->mmap = NULL;
243 compute_creds(bprm); 246 compute_creds(bprm);
@@ -329,15 +332,8 @@ beyond_if:
329 332
330 current->mm->start_stack = 333 current->mm->start_stack =
331 (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); 334 (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
332 if (!(orig_thr_flags & _TIF_32BIT)) { 335 tsb_context_switch(current->mm);
333 unsigned long pgd_cache = get_pgd_cache(current->mm->pgd); 336
334
335 __asm__ __volatile__("stxa\t%0, [%1] %2\n\t"
336 "membar #Sync"
337 : /* no outputs */
338 : "r" (pgd_cache),
339 "r" (TSB_REG), "i" (ASI_DMMU));
340 }
341 start_thread32(regs, ex.a_entry, current->mm->start_stack); 337 start_thread32(regs, ex.a_entry, current->mm->start_stack);
342 if (current->ptrace & PT_PTRACED) 338 if (current->ptrace & PT_PTRACED)
343 send_sig(SIGTRAP, current, 0); 339 send_sig(SIGTRAP, current, 0);
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
index a1a12d2aa353..8a2abcce2737 100644
--- a/arch/sparc64/kernel/binfmt_elf32.c
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -153,7 +153,9 @@ MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
153#undef MODULE_DESCRIPTION 153#undef MODULE_DESCRIPTION
154#undef MODULE_AUTHOR 154#undef MODULE_AUTHOR
155 155
156#include <asm/a.out.h>
157
156#undef TASK_SIZE 158#undef TASK_SIZE
157#define TASK_SIZE 0xf0000000 159#define TASK_SIZE STACK_TOP32
158 160
159#include "../../../fs/binfmt_elf.c" 161#include "../../../fs/binfmt_elf.c"
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 00eed88ef2e8..11cc0caef592 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -13,6 +13,7 @@
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/fpumacro.h> 14#include <asm/fpumacro.h>
15#include <asm/cpudata.h> 15#include <asm/cpudata.h>
16#include <asm/spitfire.h>
16 17
17DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; 18DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
18 19
@@ -71,6 +72,12 @@ void __init cpu_probe(void)
71 unsigned long ver, fpu_vers, manuf, impl, fprs; 72 unsigned long ver, fpu_vers, manuf, impl, fprs;
72 int i; 73 int i;
73 74
75 if (tlb_type == hypervisor) {
76 sparc_cpu_type = "UltraSparc T1 (Niagara)";
77 sparc_fpu_type = "UltraSparc T1 integrated FPU";
78 return;
79 }
80
74 fprs = fprs_read(); 81 fprs = fprs_read();
75 fprs_write(FPRS_FEF); 82 fprs_write(FPRS_FEF);
76 __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" 83 __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index df9a1ca8fd77..007e8922cd16 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -12,6 +12,7 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/bootmem.h>
15 16
16#include <asm/page.h> 17#include <asm/page.h>
17#include <asm/oplib.h> 18#include <asm/oplib.h>
@@ -20,6 +21,8 @@
20#include <asm/spitfire.h> 21#include <asm/spitfire.h>
21#include <asm/timer.h> 22#include <asm/timer.h>
22#include <asm/cpudata.h> 23#include <asm/cpudata.h>
24#include <asm/vdev.h>
25#include <asm/irq.h>
23 26
24/* Used to synchronize acceses to NatSemi SUPER I/O chip configure 27/* Used to synchronize acceses to NatSemi SUPER I/O chip configure
25 * operations in asm/ns87303.h 28 * operations in asm/ns87303.h
@@ -29,13 +32,158 @@ DEFINE_SPINLOCK(ns87303_lock);
29extern void cpu_probe(void); 32extern void cpu_probe(void);
30extern void central_probe(void); 33extern void central_probe(void);
31 34
32static char *cpu_mid_prop(void) 35u32 sun4v_vdev_devhandle;
36int sun4v_vdev_root;
37
38struct vdev_intmap {
39 unsigned int phys;
40 unsigned int irq;
41 unsigned int cnode;
42 unsigned int cinterrupt;
43};
44
45struct vdev_intmask {
46 unsigned int phys;
47 unsigned int interrupt;
48 unsigned int __unused;
49};
50
51static struct vdev_intmap *vdev_intmap;
52static int vdev_num_intmap;
53static struct vdev_intmask vdev_intmask;
54
55static void __init sun4v_virtual_device_probe(void)
56{
57 struct linux_prom64_registers regs;
58 struct vdev_intmap *ip;
59 int node, sz, err;
60
61 if (tlb_type != hypervisor)
62 return;
63
64 node = prom_getchild(prom_root_node);
65 node = prom_searchsiblings(node, "virtual-devices");
66 if (!node) {
67 prom_printf("SUN4V: Fatal error, no virtual-devices node.\n");
68 prom_halt();
69 }
70
71 sun4v_vdev_root = node;
72
73 prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
74 sun4v_vdev_devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
75
76 sz = prom_getproplen(node, "interrupt-map");
77 if (sz <= 0) {
78 prom_printf("SUN4V: Error, no vdev interrupt-map.\n");
79 prom_halt();
80 }
81
82 if ((sz % sizeof(*ip)) != 0) {
83 prom_printf("SUN4V: Bogus interrupt-map property size %d\n",
84 sz);
85 prom_halt();
86 }
87
88 vdev_intmap = ip = alloc_bootmem_low_pages(sz);
89 if (!vdev_intmap) {
90 prom_printf("SUN4V: Error, cannot allocate vdev_intmap.\n");
91 prom_halt();
92 }
93
94 err = prom_getproperty(node, "interrupt-map", (char *) ip, sz);
95 if (err == -1) {
96 prom_printf("SUN4V: Fatal error, no vdev interrupt-map.\n");
97 prom_halt();
98 }
99 if (err != sz) {
100 prom_printf("SUN4V: Inconsistent interrupt-map size, "
101 "proplen(%d) vs getprop(%d).\n", sz,err);
102 prom_halt();
103 }
104
105 vdev_num_intmap = err / sizeof(*ip);
106
107 err = prom_getproperty(node, "interrupt-map-mask",
108 (char *) &vdev_intmask,
109 sizeof(vdev_intmask));
110 if (err <= 0) {
111 prom_printf("SUN4V: Fatal error, no vdev "
112 "interrupt-map-mask.\n");
113 prom_halt();
114 }
115 if (err % sizeof(vdev_intmask)) {
116 prom_printf("SUN4V: Bogus interrupt-map-mask "
117 "property size %d\n", err);
118 prom_halt();
119 }
120
121 printk("SUN4V: virtual-devices devhandle[%x]\n",
122 sun4v_vdev_devhandle);
123}
124
125unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node)
126{
127 unsigned int irq, reg;
128 int err, i;
129
130 err = prom_getproperty(dev_node, "interrupts",
131 (char *) &irq, sizeof(irq));
132 if (err <= 0) {
133 printk("VDEV: Cannot get \"interrupts\" "
134 "property for OBP node %x\n", dev_node);
135 return 0;
136 }
137
138 err = prom_getproperty(dev_node, "reg",
139 (char *) &reg, sizeof(reg));
140 if (err <= 0) {
141 printk("VDEV: Cannot get \"reg\" "
142 "property for OBP node %x\n", dev_node);
143 return 0;
144 }
145
146 for (i = 0; i < vdev_num_intmap; i++) {
147 if (vdev_intmap[i].phys == (reg & vdev_intmask.phys) &&
148 vdev_intmap[i].irq == (irq & vdev_intmask.interrupt)) {
149 irq = vdev_intmap[i].cinterrupt;
150 break;
151 }
152 }
153
154 if (i == vdev_num_intmap) {
155 printk("VDEV: No matching interrupt map entry "
156 "for OBP node %x\n", dev_node);
157 return 0;
158 }
159
160 return sun4v_build_irq(sun4v_vdev_devhandle, irq, 5, 0);
161}
162
163static const char *cpu_mid_prop(void)
33{ 164{
34 if (tlb_type == spitfire) 165 if (tlb_type == spitfire)
35 return "upa-portid"; 166 return "upa-portid";
36 return "portid"; 167 return "portid";
37} 168}
38 169
170static int get_cpu_mid(int prom_node)
171{
172 if (tlb_type == hypervisor) {
173 struct linux_prom64_registers reg;
174
175 if (prom_getproplen(prom_node, "cpuid") == 4)
176 return prom_getintdefault(prom_node, "cpuid", 0);
177
178 prom_getproperty(prom_node, "reg", (char *) &reg, sizeof(reg));
179 return (reg.phys_addr >> 32) & 0x0fffffffUL;
180 } else {
181 const char *prop_name = cpu_mid_prop();
182
183 return prom_getintdefault(prom_node, prop_name, 0);
184 }
185}
186
39static int check_cpu_node(int nd, int *cur_inst, 187static int check_cpu_node(int nd, int *cur_inst,
40 int (*compare)(int, int, void *), void *compare_arg, 188 int (*compare)(int, int, void *), void *compare_arg,
41 int *prom_node, int *mid) 189 int *prom_node, int *mid)
@@ -50,7 +198,7 @@ static int check_cpu_node(int nd, int *cur_inst,
50 if (prom_node) 198 if (prom_node)
51 *prom_node = nd; 199 *prom_node = nd;
52 if (mid) 200 if (mid)
53 *mid = prom_getintdefault(nd, cpu_mid_prop(), 0); 201 *mid = get_cpu_mid(nd);
54 return 0; 202 return 0;
55 } 203 }
56 204
@@ -105,7 +253,7 @@ static int cpu_mid_compare(int nd, int instance, void *_arg)
105 int desired_mid = (int) (long) _arg; 253 int desired_mid = (int) (long) _arg;
106 int this_mid; 254 int this_mid;
107 255
108 this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0); 256 this_mid = get_cpu_mid(nd);
109 if (this_mid == desired_mid) 257 if (this_mid == desired_mid)
110 return 0; 258 return 0;
111 return -ENODEV; 259 return -ENODEV;
@@ -126,7 +274,8 @@ void __init device_scan(void)
126 274
127#ifndef CONFIG_SMP 275#ifndef CONFIG_SMP
128 { 276 {
129 int err, cpu_node; 277 int err, cpu_node, def;
278
130 err = cpu_find_by_instance(0, &cpu_node, NULL); 279 err = cpu_find_by_instance(0, &cpu_node, NULL);
131 if (err) { 280 if (err) {
132 prom_printf("No cpu nodes, cannot continue\n"); 281 prom_printf("No cpu nodes, cannot continue\n");
@@ -135,21 +284,40 @@ void __init device_scan(void)
135 cpu_data(0).clock_tick = prom_getintdefault(cpu_node, 284 cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
136 "clock-frequency", 285 "clock-frequency",
137 0); 286 0);
287
288 def = ((tlb_type == hypervisor) ?
289 (8 * 1024) :
290 (16 * 1024));
138 cpu_data(0).dcache_size = prom_getintdefault(cpu_node, 291 cpu_data(0).dcache_size = prom_getintdefault(cpu_node,
139 "dcache-size", 292 "dcache-size",
140 16 * 1024); 293 def);
294
295 def = 32;
141 cpu_data(0).dcache_line_size = 296 cpu_data(0).dcache_line_size =
142 prom_getintdefault(cpu_node, "dcache-line-size", 32); 297 prom_getintdefault(cpu_node, "dcache-line-size",
298 def);
299
300 def = 16 * 1024;
143 cpu_data(0).icache_size = prom_getintdefault(cpu_node, 301 cpu_data(0).icache_size = prom_getintdefault(cpu_node,
144 "icache-size", 302 "icache-size",
145 16 * 1024); 303 def);
304
305 def = 32;
146 cpu_data(0).icache_line_size = 306 cpu_data(0).icache_line_size =
147 prom_getintdefault(cpu_node, "icache-line-size", 32); 307 prom_getintdefault(cpu_node, "icache-line-size",
308 def);
309
310 def = ((tlb_type == hypervisor) ?
311 (3 * 1024 * 1024) :
312 (4 * 1024 * 1024));
148 cpu_data(0).ecache_size = prom_getintdefault(cpu_node, 313 cpu_data(0).ecache_size = prom_getintdefault(cpu_node,
149 "ecache-size", 314 "ecache-size",
150 4 * 1024 * 1024); 315 def);
316
317 def = 64;
151 cpu_data(0).ecache_line_size = 318 cpu_data(0).ecache_line_size =
152 prom_getintdefault(cpu_node, "ecache-line-size", 64); 319 prom_getintdefault(cpu_node, "ecache-line-size",
320 def);
153 printk("CPU[0]: Caches " 321 printk("CPU[0]: Caches "
154 "D[sz(%d):line_sz(%d)] " 322 "D[sz(%d):line_sz(%d)] "
155 "I[sz(%d):line_sz(%d)] " 323 "I[sz(%d):line_sz(%d)] "
@@ -160,6 +328,7 @@ void __init device_scan(void)
160 } 328 }
161#endif 329#endif
162 330
331 sun4v_virtual_device_probe();
163 central_probe(); 332 central_probe();
164 333
165 cpu_probe(); 334 cpu_probe();
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
deleted file mode 100644
index acc889a7f9c1..000000000000
--- a/arch/sparc64/kernel/dtlb_backend.S
+++ /dev/null
@@ -1,170 +0,0 @@
1/* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $
2 * dtlb_backend.S: Back end to DTLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/pgtable.h>
10#include <asm/mmu.h>
11
12#define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS)
13
14#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
15#define VPTE_SHIFT (PAGE_SHIFT - 3)
16
17/* Ways we can get here:
18 *
19 * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1.
20 * 2) Nucleus loads and stores to/from user/kernel window save areas.
21 * 3) VPTE misses from dtlb_base and itlb_base.
22 *
23 * We need to extract out the PMD and PGDIR indexes from the
24 * linear virtual page table access address. The PTE index
25 * is at the bottom, but we are not concerned with it. Bits
26 * 0 to 2 are clear since each PTE is 8 bytes in size. Each
27 * PMD and PGDIR entry are 4 bytes in size. Thus, this
28 * address looks something like:
29 *
30 * |---------------------------------------------------------------|
31 * | ... | PGDIR index | PMD index | PTE index | |
32 * |---------------------------------------------------------------|
33 * 63 F E D C B A 3 2 0 <- bit nr
34 *
35 * The variable bits above are defined as:
36 * A --> 3 + (PAGE_SHIFT - log2(8))
37 * --> 3 + (PAGE_SHIFT - 3) - 1
38 * (ie. this is "bit 3" + PAGE_SIZE - size of PTE entry in bits - 1)
39 * B --> A + 1
40 * C --> B + (PAGE_SHIFT - log2(4))
41 * --> B + (PAGE_SHIFT - 2) - 1
42 * (ie. this is "bit B" + PAGE_SIZE - size of PMD entry in bits - 1)
43 * D --> C + 1
44 * E --> D + (PAGE_SHIFT - log2(4))
45 * --> D + (PAGE_SHIFT - 2) - 1
46 * (ie. this is "bit D" + PAGE_SIZE - size of PGDIR entry in bits - 1)
47 * F --> E + 1
48 *
49 * (Note how "B" always evalutes to PAGE_SHIFT, all the other constants
50 * cancel out.)
51 *
52 * For 8K PAGE_SIZE (thus, PAGE_SHIFT of 13) the bit numbers are:
53 * A --> 12
54 * B --> 13
55 * C --> 23
56 * D --> 24
57 * E --> 34
58 * F --> 35
59 *
60 * For 64K PAGE_SIZE (thus, PAGE_SHIFT of 16) the bit numbers are:
61 * A --> 15
62 * B --> 16
63 * C --> 29
64 * D --> 30
65 * E --> 43
66 * F --> 44
67 *
68 * Because bits both above and below each PGDIR and PMD index need to
69 * be masked out, and the index can be as long as 14 bits (when using a
70 * 64K PAGE_SIZE, and thus a PAGE_SHIFT of 16), we need 3 instructions
71 * to extract each index out.
72 *
73 * Shifts do not pair very well on UltraSPARC-I, II, IIi, and IIe, so
74 * we try to avoid using them for the entire operation. We could setup
75 * a mask anywhere from bit 31 down to bit 10 using the sethi instruction.
76 *
77 * We need a mask covering bits B --> C and one covering D --> E.
78 * For 8K PAGE_SIZE these masks are 0x00ffe000 and 0x7ff000000.
79 * For 64K PAGE_SIZE these masks are 0x3fff0000 and 0xfffc0000000.
80 * The second in each set cannot be loaded with a single sethi
81 * instruction, because the upper bits are past bit 32. We would
82 * need to use a sethi + a shift.
83 *
84 * For the time being, we use 2 shifts and a simple "and" mask.
85 * We shift left to clear the bits above the index, we shift down
86 * to clear the bits below the index (sans the log2(4 or 8) bits)
87 * and a mask to clear the log2(4 or 8) bits. We need therefore
88 * define 4 shift counts, all of which are relative to PAGE_SHIFT.
89 *
90 * Although unsupportable for other reasons, this does mean that
91 * 512K and 4MB page sizes would be generaally supported by the
92 * kernel. (ELF binaries would break with > 64K PAGE_SIZE since
93 * the sections are only aligned that strongly).
94 *
95 * The operations performed for extraction are thus:
96 *
97 * ((X << FOO_SHIFT_LEFT) >> FOO_SHIFT_RIGHT) & ~0x3
98 *
99 */
100
101#define A (3 + (PAGE_SHIFT - 3) - 1)
102#define B (A + 1)
103#define C (B + (PAGE_SHIFT - 2) - 1)
104#define D (C + 1)
105#define E (D + (PAGE_SHIFT - 2) - 1)
106#define F (E + 1)
107
108#define PMD_SHIFT_LEFT (64 - D)
109#define PMD_SHIFT_RIGHT (64 - (D - B) - 2)
110#define PGDIR_SHIFT_LEFT (64 - F)
111#define PGDIR_SHIFT_RIGHT (64 - (F - D) - 2)
112#define LOW_MASK_BITS 0x3
113
114/* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss */
115 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
116 add %g3, %g3, %g5 ! Compute VPTE base
117 cmp %g4, %g5 ! VPTE miss?
118 bgeu,pt %xcc, 1f ! Continue here
119 andcc %g4, TAG_CONTEXT_BITS, %g5 ! tl0 miss Nucleus test
120 ba,a,pt %xcc, from_tl1_trap ! Fall to tl0 miss
1211: sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS
122 or %g4, %g5, %g4 ! Prepare TAG_ACCESS
123
124/* TLB1 ** ICACHE line 2: Quick VPTE miss */
125 mov TSB_REG, %g1 ! Grab TSB reg
126 ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching?
127 sllx %g6, PMD_SHIFT_LEFT, %g1 ! Position PMD offset
128 be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus?
129 srlx %g1, PMD_SHIFT_RIGHT, %g1 ! Mask PMD offset bits
130 brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke
131 andn %g1, LOW_MASK_BITS, %g1 ! Final PMD mask
132 sllx %g6, PGDIR_SHIFT_LEFT, %g5 ! Position PGD offset
133
134/* TLB1 ** ICACHE line 3: Quick VPTE miss */
135 srlx %g5, PGDIR_SHIFT_RIGHT, %g5 ! Mask PGD offset bits
136 andn %g5, LOW_MASK_BITS, %g5 ! Final PGD mask
137 lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD
138 brz,pn %g5, vpte_noent ! Valid?
139sparc64_kpte_continue:
140 sllx %g5, 11, %g5 ! Shift into place
141sparc64_vpte_continue:
142 lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD
143 sllx %g5, 11, %g5 ! Shift into place
144 brz,pn %g5, vpte_noent ! Valid?
145
146/* TLB1 ** ICACHE line 4: Quick VPTE miss */
147 mov (VALID_SZ_BITS >> 61), %g1 ! upper vpte into %g1
148 sllx %g1, 61, %g1 ! finish calc
149 or %g5, VPTE_BITS, %g5 ! Prepare VPTE data
150 or %g5, %g1, %g5 ! ...
151 mov TLB_SFSR, %g1 ! Restore %g1 value
152 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load VPTE into TLB
153 stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
154 retry ! Load PTE once again
155
156#undef VALID_SZ_BITS
157#undef VPTE_SHIFT
158#undef VPTE_BITS
159#undef A
160#undef B
161#undef C
162#undef D
163#undef E
164#undef F
165#undef PMD_SHIFT_LEFT
166#undef PMD_SHIFT_RIGHT
167#undef PGDIR_SHIFT_LEFT
168#undef PGDIR_SHIFT_RIGHT
169#undef LOW_MASK_BITS
170
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
deleted file mode 100644
index 6528786840c0..000000000000
--- a/arch/sparc64/kernel/dtlb_base.S
+++ /dev/null
@@ -1,109 +0,0 @@
1/* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $
2 * dtlb_base.S: Front end to DTLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/pgtable.h>
10#include <asm/mmu.h>
11
12/* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS)
13 * %g2 (KERN_HIGHBITS | KERN_LOWBITS)
14 * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space)
15 * (0xffe0000000000000) Cheetah (64-bit VA space)
16 * %g7 __pa(current->mm->pgd)
17 *
18 * The VPTE base value is completely magic, but note that
19 * few places in the kernel other than these TLB miss
20 * handlers know anything about the VPTE mechanism or
21 * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD).
22 * Consider the 44-bit VADDR Ultra-I/II case as an example:
23 *
24 * VA[0 : (1<<43)] produce VPTE index [%g3 : 0]
25 * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3]
26 *
27 * For Cheetah's 64-bit VADDR space this is:
28 *
29 * VA[0 : (1<<63)] produce VPTE index [%g3 : 0]
30 * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3]
31 *
32 * If you're paying attention you'll notice that this means half of
33 * the VPTE table is above %g3 and half is below, low VA addresses
34 * map progressively upwards from %g3, and high VA addresses map
35 * progressively upwards towards %g3. This trick was needed to make
36 * the same 8 instruction handler work both for Spitfire/Blackbird's
37 * peculiar VA space hole configuration and the full 64-bit VA space
38 * one of Cheetah at the same time.
39 */
40
41/* Ways we can get here:
42 *
43 * 1) Nucleus loads and stores to/from PA-->VA direct mappings.
44 * 2) Nucleus loads and stores to/from vmalloc() areas.
45 * 3) User loads and stores.
46 * 4) User space accesses by nucleus at tl0
47 */
48
49#if PAGE_SHIFT == 13
50/*
51 * To compute vpte offset, we need to do ((addr >> 13) << 3),
52 * which can be optimized to (addr >> 10) if bits 10/11/12 can
53 * be guaranteed to be 0 ... mmu_context.h does guarantee this
54 * by only using 10 bits in the hwcontext value.
55 */
56#define CREATE_VPTE_OFFSET1(r1, r2) nop
57#define CREATE_VPTE_OFFSET2(r1, r2) \
58 srax r1, 10, r2
59#else
60#define CREATE_VPTE_OFFSET1(r1, r2) \
61 srax r1, PAGE_SHIFT, r2
62#define CREATE_VPTE_OFFSET2(r1, r2) \
63 sllx r2, 3, r2
64#endif
65
66/* DTLB ** ICACHE line 1: Quick user TLB misses */
67 mov TLB_SFSR, %g1
68 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
69 andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
70from_tl1_trap:
71 rdpr %tl, %g5 ! For TL==3 test
72 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
73 be,pn %xcc, kvmap ! Yep, special processing
74 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
75 cmp %g5, 4 ! Last trap level?
76
77/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
78 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
79 nop ! delay slot
80 ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
811: brgez,pn %g5, longpath ! Invalid, branch out
82 nop ! Delay-slot
839: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
84 retry ! Trap return
85 nop
86
87/* DTLB ** ICACHE line 3: winfixups+real_faults */
88longpath:
89 rdpr %pstate, %g5 ! Move into alternate globals
90 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
91 rdpr %tl, %g4 ! See where we came from.
92 cmp %g4, 1 ! Is etrap/rtrap window fault?
93 mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing
94 ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page
95 be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling
96 mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB
97
98/* DTLB ** ICACHE line 4: Unused... */
99 ba,a,pt %xcc, winfix_trampoline ! Call window fixup code
100 nop
101 nop
102 nop
103 nop
104 nop
105 nop
106 nop
107
108#undef CREATE_VPTE_OFFSET1
109#undef CREATE_VPTE_OFFSET2
diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S
new file mode 100644
index 000000000000..09a6a15a7105
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_miss.S
@@ -0,0 +1,39 @@
1/* DTLB ** ICACHE line 1: Context 0 check and TSB load */
2 ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer
3 ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET
4 srlx %g6, 48, %g5 ! Get context
5 sllx %g6, 22, %g6 ! Zero out context
6 brz,pn %g5, kvmap_dtlb ! Context 0 processing
7 srlx %g6, 22, %g6 ! Delay slot
8 TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
9 cmp %g4, %g6 ! Compare TAG
10
11/* DTLB ** ICACHE line 2: TSB compare and TLB load */
12 bne,pn %xcc, tsb_miss_dtlb ! Miss
13 mov FAULT_CODE_DTLB, %g3
14 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load TLB
15 retry ! Trap done
16 nop
17 nop
18 nop
19 nop
20
21/* DTLB ** ICACHE line 3: */
22 nop
23 nop
24 nop
25 nop
26 nop
27 nop
28 nop
29 nop
30
31/* DTLB ** ICACHE line 4: */
32 nop
33 nop
34 nop
35 nop
36 nop
37 nop
38 nop
39 nop
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index 7991e919d8ab..c69504aa638f 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -277,10 +277,9 @@ static inline void *ebus_alloc(size_t size)
277{ 277{
278 void *mem; 278 void *mem;
279 279
280 mem = kmalloc(size, GFP_ATOMIC); 280 mem = kzalloc(size, GFP_ATOMIC);
281 if (!mem) 281 if (!mem)
282 panic("ebus_alloc: out of memory"); 282 panic("ebus_alloc: out of memory");
283 memset((char *)mem, 0, size);
284 return mem; 283 return mem;
285} 284}
286 285
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index a73553ae7e53..6d0b3ed77a02 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -50,7 +50,8 @@ do_fpdis:
50 add %g0, %g0, %g0 50 add %g0, %g0, %g0
51 ba,a,pt %xcc, rtrap_clr_l6 51 ba,a,pt %xcc, rtrap_clr_l6
52 52
531: ldub [%g6 + TI_FPSAVED], %g5 531: TRAP_LOAD_THREAD_REG(%g6, %g1)
54 ldub [%g6 + TI_FPSAVED], %g5
54 wr %g0, FPRS_FEF, %fprs 55 wr %g0, FPRS_FEF, %fprs
55 andcc %g5, FPRS_FEF, %g0 56 andcc %g5, FPRS_FEF, %g0
56 be,a,pt %icc, 1f 57 be,a,pt %icc, 1f
@@ -96,10 +97,22 @@ do_fpdis:
96 add %g6, TI_FPREGS + 0x80, %g1 97 add %g6, TI_FPREGS + 0x80, %g1
97 faddd %f0, %f2, %f4 98 faddd %f0, %f2, %f4
98 fmuld %f0, %f2, %f6 99 fmuld %f0, %f2, %f6
99 ldxa [%g3] ASI_DMMU, %g5 100
101661: ldxa [%g3] ASI_DMMU, %g5
102 .section .sun4v_1insn_patch, "ax"
103 .word 661b
104 ldxa [%g3] ASI_MMU, %g5
105 .previous
106
100 sethi %hi(sparc64_kern_sec_context), %g2 107 sethi %hi(sparc64_kern_sec_context), %g2
101 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 108 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
102 stxa %g2, [%g3] ASI_DMMU 109
110661: stxa %g2, [%g3] ASI_DMMU
111 .section .sun4v_1insn_patch, "ax"
112 .word 661b
113 stxa %g2, [%g3] ASI_MMU
114 .previous
115
103 membar #Sync 116 membar #Sync
104 add %g6, TI_FPREGS + 0xc0, %g2 117 add %g6, TI_FPREGS + 0xc0, %g2
105 faddd %f0, %f2, %f8 118 faddd %f0, %f2, %f8
@@ -125,11 +138,23 @@ do_fpdis:
125 fzero %f32 138 fzero %f32
126 mov SECONDARY_CONTEXT, %g3 139 mov SECONDARY_CONTEXT, %g3
127 fzero %f34 140 fzero %f34
128 ldxa [%g3] ASI_DMMU, %g5 141
142661: ldxa [%g3] ASI_DMMU, %g5
143 .section .sun4v_1insn_patch, "ax"
144 .word 661b
145 ldxa [%g3] ASI_MMU, %g5
146 .previous
147
129 add %g6, TI_FPREGS, %g1 148 add %g6, TI_FPREGS, %g1
130 sethi %hi(sparc64_kern_sec_context), %g2 149 sethi %hi(sparc64_kern_sec_context), %g2
131 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 150 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
132 stxa %g2, [%g3] ASI_DMMU 151
152661: stxa %g2, [%g3] ASI_DMMU
153 .section .sun4v_1insn_patch, "ax"
154 .word 661b
155 stxa %g2, [%g3] ASI_MMU
156 .previous
157
133 membar #Sync 158 membar #Sync
134 add %g6, TI_FPREGS + 0x40, %g2 159 add %g6, TI_FPREGS + 0x40, %g2
135 faddd %f32, %f34, %f36 160 faddd %f32, %f34, %f36
@@ -154,10 +179,22 @@ do_fpdis:
154 nop 179 nop
1553: mov SECONDARY_CONTEXT, %g3 1803: mov SECONDARY_CONTEXT, %g3
156 add %g6, TI_FPREGS, %g1 181 add %g6, TI_FPREGS, %g1
157 ldxa [%g3] ASI_DMMU, %g5 182
183661: ldxa [%g3] ASI_DMMU, %g5
184 .section .sun4v_1insn_patch, "ax"
185 .word 661b
186 ldxa [%g3] ASI_MMU, %g5
187 .previous
188
158 sethi %hi(sparc64_kern_sec_context), %g2 189 sethi %hi(sparc64_kern_sec_context), %g2
159 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 190 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
160 stxa %g2, [%g3] ASI_DMMU 191
192661: stxa %g2, [%g3] ASI_DMMU
193 .section .sun4v_1insn_patch, "ax"
194 .word 661b
195 stxa %g2, [%g3] ASI_MMU
196 .previous
197
161 membar #Sync 198 membar #Sync
162 mov 0x40, %g2 199 mov 0x40, %g2
163 membar #Sync 200 membar #Sync
@@ -168,7 +205,13 @@ do_fpdis:
168 ldda [%g1 + %g2] ASI_BLK_S, %f48 205 ldda [%g1 + %g2] ASI_BLK_S, %f48
169 membar #Sync 206 membar #Sync
170fpdis_exit: 207fpdis_exit:
171 stxa %g5, [%g3] ASI_DMMU 208
209661: stxa %g5, [%g3] ASI_DMMU
210 .section .sun4v_1insn_patch, "ax"
211 .word 661b
212 stxa %g5, [%g3] ASI_MMU
213 .previous
214
172 membar #Sync 215 membar #Sync
173fpdis_exit2: 216fpdis_exit2:
174 wr %g7, 0, %gsr 217 wr %g7, 0, %gsr
@@ -189,6 +232,7 @@ fp_other_bounce:
189 .globl do_fpother_check_fitos 232 .globl do_fpother_check_fitos
190 .align 32 233 .align 32
191do_fpother_check_fitos: 234do_fpother_check_fitos:
235 TRAP_LOAD_THREAD_REG(%g6, %g1)
192 sethi %hi(fp_other_bounce - 4), %g7 236 sethi %hi(fp_other_bounce - 4), %g7
193 or %g7, %lo(fp_other_bounce - 4), %g7 237 or %g7, %lo(fp_other_bounce - 4), %g7
194 238
@@ -312,6 +356,7 @@ fitos_emul_fini:
312 .globl do_fptrap 356 .globl do_fptrap
313 .align 32 357 .align 32
314do_fptrap: 358do_fptrap:
359 TRAP_LOAD_THREAD_REG(%g6, %g1)
315 stx %fsr, [%g6 + TI_XFSR] 360 stx %fsr, [%g6 + TI_XFSR]
316do_fptrap_after_fsr: 361do_fptrap_after_fsr:
317 ldub [%g6 + TI_FPSAVED], %g3 362 ldub [%g6 + TI_FPSAVED], %g3
@@ -321,10 +366,22 @@ do_fptrap_after_fsr:
321 rd %gsr, %g3 366 rd %gsr, %g3
322 stx %g3, [%g6 + TI_GSR] 367 stx %g3, [%g6 + TI_GSR]
323 mov SECONDARY_CONTEXT, %g3 368 mov SECONDARY_CONTEXT, %g3
324 ldxa [%g3] ASI_DMMU, %g5 369
370661: ldxa [%g3] ASI_DMMU, %g5
371 .section .sun4v_1insn_patch, "ax"
372 .word 661b
373 ldxa [%g3] ASI_MMU, %g5
374 .previous
375
325 sethi %hi(sparc64_kern_sec_context), %g2 376 sethi %hi(sparc64_kern_sec_context), %g2
326 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 377 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
327 stxa %g2, [%g3] ASI_DMMU 378
379661: stxa %g2, [%g3] ASI_DMMU
380 .section .sun4v_1insn_patch, "ax"
381 .word 661b
382 stxa %g2, [%g3] ASI_MMU
383 .previous
384
328 membar #Sync 385 membar #Sync
329 add %g6, TI_FPREGS, %g2 386 add %g6, TI_FPREGS, %g2
330 andcc %g1, FPRS_DL, %g0 387 andcc %g1, FPRS_DL, %g0
@@ -339,7 +396,13 @@ do_fptrap_after_fsr:
339 stda %f48, [%g2 + %g3] ASI_BLK_S 396 stda %f48, [%g2 + %g3] ASI_BLK_S
3405: mov SECONDARY_CONTEXT, %g1 3975: mov SECONDARY_CONTEXT, %g1
341 membar #Sync 398 membar #Sync
342 stxa %g5, [%g1] ASI_DMMU 399
400661: stxa %g5, [%g1] ASI_DMMU
401 .section .sun4v_1insn_patch, "ax"
402 .word 661b
403 stxa %g5, [%g1] ASI_MMU
404 .previous
405
343 membar #Sync 406 membar #Sync
344 ba,pt %xcc, etrap 407 ba,pt %xcc, etrap
345 wr %g0, 0, %fprs 408 wr %g0, 0, %fprs
@@ -353,8 +416,6 @@ do_fptrap_after_fsr:
353 * 416 *
354 * With this method we can do most of the cross-call tlb/cache 417 * With this method we can do most of the cross-call tlb/cache
355 * flushing very quickly. 418 * flushing very quickly.
356 *
357 * Current CPU's IRQ worklist table is locked into %g6, don't touch.
358 */ 419 */
359 .text 420 .text
360 .align 32 421 .align 32
@@ -378,6 +439,8 @@ do_ivec:
378 sllx %g2, %g4, %g2 439 sllx %g2, %g4, %g2
379 sllx %g4, 2, %g4 440 sllx %g4, 2, %g4
380 441
442 TRAP_LOAD_IRQ_WORK(%g6, %g1)
443
381 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ 444 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
382 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ 445 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
383 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ 446 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
@@ -399,76 +462,6 @@ do_ivec_xcall:
3991: jmpl %g3, %g0 4621: jmpl %g3, %g0
400 nop 463 nop
401 464
402 .globl save_alternate_globals
403save_alternate_globals: /* %o0 = save_area */
404 rdpr %pstate, %o5
405 andn %o5, PSTATE_IE, %o1
406 wrpr %o1, PSTATE_AG, %pstate
407 stx %g0, [%o0 + 0x00]
408 stx %g1, [%o0 + 0x08]
409 stx %g2, [%o0 + 0x10]
410 stx %g3, [%o0 + 0x18]
411 stx %g4, [%o0 + 0x20]
412 stx %g5, [%o0 + 0x28]
413 stx %g6, [%o0 + 0x30]
414 stx %g7, [%o0 + 0x38]
415 wrpr %o1, PSTATE_IG, %pstate
416 stx %g0, [%o0 + 0x40]
417 stx %g1, [%o0 + 0x48]
418 stx %g2, [%o0 + 0x50]
419 stx %g3, [%o0 + 0x58]
420 stx %g4, [%o0 + 0x60]
421 stx %g5, [%o0 + 0x68]
422 stx %g6, [%o0 + 0x70]
423 stx %g7, [%o0 + 0x78]
424 wrpr %o1, PSTATE_MG, %pstate
425 stx %g0, [%o0 + 0x80]
426 stx %g1, [%o0 + 0x88]
427 stx %g2, [%o0 + 0x90]
428 stx %g3, [%o0 + 0x98]
429 stx %g4, [%o0 + 0xa0]
430 stx %g5, [%o0 + 0xa8]
431 stx %g6, [%o0 + 0xb0]
432 stx %g7, [%o0 + 0xb8]
433 wrpr %o5, 0x0, %pstate
434 retl
435 nop
436
437 .globl restore_alternate_globals
438restore_alternate_globals: /* %o0 = save_area */
439 rdpr %pstate, %o5
440 andn %o5, PSTATE_IE, %o1
441 wrpr %o1, PSTATE_AG, %pstate
442 ldx [%o0 + 0x00], %g0
443 ldx [%o0 + 0x08], %g1
444 ldx [%o0 + 0x10], %g2
445 ldx [%o0 + 0x18], %g3
446 ldx [%o0 + 0x20], %g4
447 ldx [%o0 + 0x28], %g5
448 ldx [%o0 + 0x30], %g6
449 ldx [%o0 + 0x38], %g7
450 wrpr %o1, PSTATE_IG, %pstate
451 ldx [%o0 + 0x40], %g0
452 ldx [%o0 + 0x48], %g1
453 ldx [%o0 + 0x50], %g2
454 ldx [%o0 + 0x58], %g3
455 ldx [%o0 + 0x60], %g4
456 ldx [%o0 + 0x68], %g5
457 ldx [%o0 + 0x70], %g6
458 ldx [%o0 + 0x78], %g7
459 wrpr %o1, PSTATE_MG, %pstate
460 ldx [%o0 + 0x80], %g0
461 ldx [%o0 + 0x88], %g1
462 ldx [%o0 + 0x90], %g2
463 ldx [%o0 + 0x98], %g3
464 ldx [%o0 + 0xa0], %g4
465 ldx [%o0 + 0xa8], %g5
466 ldx [%o0 + 0xb0], %g6
467 ldx [%o0 + 0xb8], %g7
468 wrpr %o5, 0x0, %pstate
469 retl
470 nop
471
472 .globl getcc, setcc 465 .globl getcc, setcc
473getcc: 466getcc:
474 ldx [%o0 + PT_V9_TSTATE], %o1 467 ldx [%o0 + PT_V9_TSTATE], %o1
@@ -488,9 +481,24 @@ setcc:
488 retl 481 retl
489 stx %o1, [%o0 + PT_V9_TSTATE] 482 stx %o1, [%o0 + PT_V9_TSTATE]
490 483
491 .globl utrap, utrap_ill 484 .globl utrap_trap
492utrap: brz,pn %g1, etrap 485utrap_trap: /* %g3=handler,%g4=level */
486 TRAP_LOAD_THREAD_REG(%g6, %g1)
487 ldx [%g6 + TI_UTRAPS], %g1
488 brnz,pt %g1, invoke_utrap
493 nop 489 nop
490
491 ba,pt %xcc, etrap
492 rd %pc, %g7
493 mov %l4, %o1
494 call bad_trap
495 add %sp, PTREGS_OFF, %o0
496 ba,pt %xcc, rtrap
497 clr %l6
498
499invoke_utrap:
500 sllx %g3, 3, %g3
501 ldx [%g1 + %g3], %g1
494 save %sp, -128, %sp 502 save %sp, -128, %sp
495 rdpr %tstate, %l6 503 rdpr %tstate, %l6
496 rdpr %cwp, %l7 504 rdpr %cwp, %l7
@@ -500,17 +508,6 @@ utrap: brz,pn %g1, etrap
500 rdpr %tnpc, %l7 508 rdpr %tnpc, %l7
501 wrpr %g1, 0, %tnpc 509 wrpr %g1, 0, %tnpc
502 done 510 done
503utrap_ill:
504 call bad_trap
505 add %sp, PTREGS_OFF, %o0
506 ba,pt %xcc, rtrap
507 clr %l6
508
509 /* XXX Here is stuff we still need to write... -DaveM XXX */
510 .globl netbsd_syscall
511netbsd_syscall:
512 retl
513 nop
514 511
515 /* We need to carefully read the error status, ACK 512 /* We need to carefully read the error status, ACK
516 * the errors, prevent recursive traps, and pass the 513 * the errors, prevent recursive traps, and pass the
@@ -1001,7 +998,7 @@ dcpe_icpe_tl1_common:
1001 * %g3: scratch 998 * %g3: scratch
1002 * %g4: AFSR 999 * %g4: AFSR
1003 * %g5: AFAR 1000 * %g5: AFAR
1004 * %g6: current thread ptr 1001 * %g6: unused, will have current thread ptr after etrap
1005 * %g7: scratch 1002 * %g7: scratch
1006 */ 1003 */
1007__cheetah_log_error: 1004__cheetah_log_error:
@@ -1539,13 +1536,14 @@ ret_from_syscall:
1539 1536
15401: b,pt %xcc, ret_sys_call 15371: b,pt %xcc, ret_sys_call
1541 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 1538 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
1542sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate 1539sparc_exit: rdpr %pstate, %g2
1540 wrpr %g2, PSTATE_IE, %pstate
1543 rdpr %otherwin, %g1 1541 rdpr %otherwin, %g1
1544 rdpr %cansave, %g3 1542 rdpr %cansave, %g3
1545 add %g3, %g1, %g3 1543 add %g3, %g1, %g3
1546 wrpr %g3, 0x0, %cansave 1544 wrpr %g3, 0x0, %cansave
1547 wrpr %g0, 0x0, %otherwin 1545 wrpr %g0, 0x0, %otherwin
1548 wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate 1546 wrpr %g2, 0x0, %pstate
1549 ba,pt %xcc, sys_exit 1547 ba,pt %xcc, sys_exit
1550 stb %g0, [%g6 + TI_WSAVED] 1548 stb %g0, [%g6 + TI_WSAVED]
1551 1549
@@ -1690,3 +1688,138 @@ __flushw_user:
1690 restore %g0, %g0, %g0 1688 restore %g0, %g0, %g0
16912: retl 16892: retl
1692 nop 1690 nop
1691
1692#ifdef CONFIG_SMP
1693 .globl hard_smp_processor_id
1694hard_smp_processor_id:
1695#endif
1696 .globl real_hard_smp_processor_id
1697real_hard_smp_processor_id:
1698 __GET_CPUID(%o0)
1699 retl
1700 nop
1701
1702 /* %o0: devhandle
1703 * %o1: devino
1704 *
1705 * returns %o0: sysino
1706 */
1707 .globl sun4v_devino_to_sysino
1708sun4v_devino_to_sysino:
1709 mov HV_FAST_INTR_DEVINO2SYSINO, %o5
1710 ta HV_FAST_TRAP
1711 retl
1712 mov %o1, %o0
1713
1714 /* %o0: sysino
1715 *
1716 * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1717 */
1718 .globl sun4v_intr_getenabled
1719sun4v_intr_getenabled:
1720 mov HV_FAST_INTR_GETENABLED, %o5
1721 ta HV_FAST_TRAP
1722 retl
1723 mov %o1, %o0
1724
1725 /* %o0: sysino
1726 * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1727 */
1728 .globl sun4v_intr_setenabled
1729sun4v_intr_setenabled:
1730 mov HV_FAST_INTR_SETENABLED, %o5
1731 ta HV_FAST_TRAP
1732 retl
1733 nop
1734
1735 /* %o0: sysino
1736 *
1737 * returns %o0: intr_state (HV_INTR_STATE_*)
1738 */
1739 .globl sun4v_intr_getstate
1740sun4v_intr_getstate:
1741 mov HV_FAST_INTR_GETSTATE, %o5
1742 ta HV_FAST_TRAP
1743 retl
1744 mov %o1, %o0
1745
1746 /* %o0: sysino
1747 * %o1: intr_state (HV_INTR_STATE_*)
1748 */
1749 .globl sun4v_intr_setstate
1750sun4v_intr_setstate:
1751 mov HV_FAST_INTR_SETSTATE, %o5
1752 ta HV_FAST_TRAP
1753 retl
1754 nop
1755
1756 /* %o0: sysino
1757 *
1758 * returns %o0: cpuid
1759 */
1760 .globl sun4v_intr_gettarget
1761sun4v_intr_gettarget:
1762 mov HV_FAST_INTR_GETTARGET, %o5
1763 ta HV_FAST_TRAP
1764 retl
1765 mov %o1, %o0
1766
1767 /* %o0: sysino
1768 * %o1: cpuid
1769 */
1770 .globl sun4v_intr_settarget
1771sun4v_intr_settarget:
1772 mov HV_FAST_INTR_SETTARGET, %o5
1773 ta HV_FAST_TRAP
1774 retl
1775 nop
1776
1777 /* %o0: type
1778 * %o1: queue paddr
1779 * %o2: num queue entries
1780 *
1781 * returns %o0: status
1782 */
1783 .globl sun4v_cpu_qconf
1784sun4v_cpu_qconf:
1785 mov HV_FAST_CPU_QCONF, %o5
1786 ta HV_FAST_TRAP
1787 retl
1788 nop
1789
1790 /* returns %o0: status
1791 */
1792 .globl sun4v_cpu_yield
1793sun4v_cpu_yield:
1794 mov HV_FAST_CPU_YIELD, %o5
1795 ta HV_FAST_TRAP
1796 retl
1797 nop
1798
1799 /* %o0: num cpus in cpu list
1800 * %o1: cpu list paddr
1801 * %o2: mondo block paddr
1802 *
1803 * returns %o0: status
1804 */
1805 .globl sun4v_cpu_mondo_send
1806sun4v_cpu_mondo_send:
1807 mov HV_FAST_CPU_MONDO_SEND, %o5
1808 ta HV_FAST_TRAP
1809 retl
1810 nop
1811
1812 /* %o0: CPU ID
1813 *
1814 * returns %o0: -status if status non-zero, else
1815 * %o0: cpu state as HV_CPU_STATE_*
1816 */
1817 .globl sun4v_cpu_state
1818sun4v_cpu_state:
1819 mov HV_FAST_CPU_STATE, %o5
1820 ta HV_FAST_TRAP
1821 brnz,pn %o0, 1f
1822 sub %g0, %o0, %o0
1823 mov %o1, %o0
18241: retl
1825 nop
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index 0d8eba21111b..149383835c25 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -31,6 +31,7 @@
31 .globl etrap, etrap_irq, etraptl1 31 .globl etrap, etrap_irq, etraptl1
32etrap: rdpr %pil, %g2 32etrap: rdpr %pil, %g2
33etrap_irq: 33etrap_irq:
34 TRAP_LOAD_THREAD_REG(%g6, %g1)
34 rdpr %tstate, %g1 35 rdpr %tstate, %g1
35 sllx %g2, 20, %g3 36 sllx %g2, 20, %g3
36 andcc %g1, TSTATE_PRIV, %g0 37 andcc %g1, TSTATE_PRIV, %g0
@@ -54,7 +55,31 @@ etrap_irq:
54 rd %y, %g3 55 rd %y, %g3
55 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] 56 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
56 st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] 57 st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
57 save %g2, -STACK_BIAS, %sp ! Ordering here is critical 58
59 rdpr %cansave, %g1
60 brnz,pt %g1, etrap_save
61 nop
62
63 rdpr %cwp, %g1
64 add %g1, 2, %g1
65 wrpr %g1, %cwp
66 be,pt %xcc, etrap_user_spill
67 mov ASI_AIUP, %g3
68
69 rdpr %otherwin, %g3
70 brz %g3, etrap_kernel_spill
71 mov ASI_AIUS, %g3
72
73etrap_user_spill:
74
75 wr %g3, 0x0, %asi
76 ldx [%g6 + TI_FLAGS], %g3
77 and %g3, _TIF_32BIT, %g3
78 brnz,pt %g3, etrap_user_spill_32bit
79 nop
80 ba,a,pt %xcc, etrap_user_spill_64bit
81
82etrap_save: save %g2, -STACK_BIAS, %sp
58 mov %g6, %l6 83 mov %g6, %l6
59 84
60 bne,pn %xcc, 3f 85 bne,pn %xcc, 3f
@@ -70,42 +95,56 @@ etrap_irq:
70 wrpr %g2, 0, %wstate 95 wrpr %g2, 0, %wstate
71 sethi %hi(sparc64_kern_pri_context), %g2 96 sethi %hi(sparc64_kern_pri_context), %g2
72 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 97 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
73 stxa %g3, [%l4] ASI_DMMU 98
74 flush %l6 99661: stxa %g3, [%l4] ASI_DMMU
75 wr %g0, ASI_AIUS, %asi 100 .section .sun4v_1insn_patch, "ax"
762: wrpr %g0, 0x0, %tl 101 .word 661b
77 mov %g4, %l4 102 stxa %g3, [%l4] ASI_MMU
103 .previous
104
105 sethi %hi(KERNBASE), %l4
106 flush %l4
107 mov ASI_AIUS, %l7
1082: mov %g4, %l4
78 mov %g5, %l5 109 mov %g5, %l5
110 add %g7, 4, %l2
111
112 /* Go to trap time globals so we can save them. */
113661: wrpr %g0, ETRAP_PSTATE1, %pstate
114 .section .sun4v_1insn_patch, "ax"
115 .word 661b
116 SET_GL(0)
117 .previous
79 118
80 mov %g7, %l2
81 wrpr %g0, ETRAP_PSTATE1, %pstate
82 stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] 119 stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
83 stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] 120 stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
121 sllx %l7, 24, %l7
84 stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] 122 stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
123 rdpr %cwp, %l0
85 stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] 124 stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
86 stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] 125 stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
87 stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] 126 stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
88
89 stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] 127 stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
128 or %l7, %l0, %l7
129 sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0
130 or %l7, %l0, %l7
131 wrpr %l2, %tnpc
132 wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
90 stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] 133 stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
91 stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] 134 stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
92 stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] 135 stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
93 stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] 136 stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
94 stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] 137 stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
95 stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] 138 stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
96
97 stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] 139 stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
98 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
99 wrpr %g0, ETRAP_PSTATE2, %pstate
100 mov %l6, %g6 140 mov %l6, %g6
101#ifdef CONFIG_SMP 141 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
102 mov TSB_REG, %g3 142 LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1)
103 ldxa [%g3] ASI_IMMU, %g5 143 ldx [%g6 + TI_TASK], %g4
104#endif 144 done
105 jmpl %l2 + 0x4, %g0
106 ldx [%g6 + TI_TASK], %g4
107 145
1083: ldub [%l6 + TI_FPDEPTH], %l5 1463: mov ASI_P, %l7
147 ldub [%l6 + TI_FPDEPTH], %l5
109 add %l6, TI_FPSAVED + 1, %l4 148 add %l6, TI_FPSAVED + 1, %l4
110 srl %l5, 1, %l3 149 srl %l5, 1, %l3
111 add %l5, 2, %l5 150 add %l5, 2, %l5
@@ -125,6 +164,7 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
125 * 0x58 TL4's TT 164 * 0x58 TL4's TT
126 * 0x60 TL 165 * 0x60 TL
127 */ 166 */
167 TRAP_LOAD_THREAD_REG(%g6, %g1)
128 sub %sp, ((4 * 8) * 4) + 8, %g2 168 sub %sp, ((4 * 8) * 4) + 8, %g2
129 rdpr %tl, %g1 169 rdpr %tl, %g1
130 170
@@ -148,6 +188,11 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
148 rdpr %tt, %g3 188 rdpr %tt, %g3
149 stx %g3, [%g2 + STACK_BIAS + 0x38] 189 stx %g3, [%g2 + STACK_BIAS + 0x38]
150 190
191 sethi %hi(is_sun4v), %g3
192 lduw [%g3 + %lo(is_sun4v)], %g3
193 brnz,pn %g3, finish_tl1_capture
194 nop
195
151 wrpr %g0, 3, %tl 196 wrpr %g0, 3, %tl
152 rdpr %tstate, %g3 197 rdpr %tstate, %g3
153 stx %g3, [%g2 + STACK_BIAS + 0x40] 198 stx %g3, [%g2 + STACK_BIAS + 0x40]
@@ -168,91 +213,20 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
168 rdpr %tt, %g3 213 rdpr %tt, %g3
169 stx %g3, [%g2 + STACK_BIAS + 0x78] 214 stx %g3, [%g2 + STACK_BIAS + 0x78]
170 215
171 wrpr %g1, %tl
172 stx %g1, [%g2 + STACK_BIAS + 0x80] 216 stx %g1, [%g2 + STACK_BIAS + 0x80]
173 217
218finish_tl1_capture:
219 wrpr %g0, 1, %tl
220661: nop
221 .section .sun4v_1insn_patch, "ax"
222 .word 661b
223 SET_GL(1)
224 .previous
225
174 rdpr %tstate, %g1 226 rdpr %tstate, %g1
175 sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 227 sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
176 ba,pt %xcc, 1b 228 ba,pt %xcc, 1b
177 andcc %g1, TSTATE_PRIV, %g0 229 andcc %g1, TSTATE_PRIV, %g0
178 230
179 .align 64
180 .globl scetrap
181scetrap: rdpr %pil, %g2
182 rdpr %tstate, %g1
183 sllx %g2, 20, %g3
184 andcc %g1, TSTATE_PRIV, %g0
185 or %g1, %g3, %g1
186 bne,pn %xcc, 1f
187 sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2
188 wrpr %g0, 7, %cleanwin
189
190 sllx %g1, 51, %g3
191 sethi %hi(TASK_REGOFF), %g2
192 or %g2, %lo(TASK_REGOFF), %g2
193 brlz,pn %g3, 1f
194 add %g6, %g2, %g2
195 wr %g0, 0, %fprs
1961: rdpr %tpc, %g3
197 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
198
199 rdpr %tnpc, %g1
200 stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
201 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
202 save %g2, -STACK_BIAS, %sp ! Ordering here is critical
203 mov %g6, %l6
204 bne,pn %xcc, 2f
205 mov ASI_P, %l7
206 rdpr %canrestore, %g3
207
208 rdpr %wstate, %g2
209 wrpr %g0, 0, %canrestore
210 sll %g2, 3, %g2
211 mov PRIMARY_CONTEXT, %l4
212 wrpr %g3, 0, %otherwin
213 wrpr %g2, 0, %wstate
214 sethi %hi(sparc64_kern_pri_context), %g2
215 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
216 stxa %g3, [%l4] ASI_DMMU
217 flush %l6
218
219 mov ASI_AIUS, %l7
2202: mov %g4, %l4
221 mov %g5, %l5
222 add %g7, 0x4, %l2
223 wrpr %g0, ETRAP_PSTATE1, %pstate
224 stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
225 stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
226 sllx %l7, 24, %l7
227
228 stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
229 rdpr %cwp, %l0
230 stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
231 stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
232 stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
233 stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
234 or %l7, %l0, %l7
235 sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0
236
237 or %l7, %l0, %l7
238 wrpr %l2, %tnpc
239 wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
240 stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
241 stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
242 stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
243 stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
244 stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
245
246 stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
247 stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
248 mov %l6, %g6
249 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
250#ifdef CONFIG_SMP
251 mov TSB_REG, %g3
252 ldxa [%g3] ASI_IMMU, %g5
253#endif
254 ldx [%g6 + TI_TASK], %g4
255 done
256
257#undef TASK_REGOFF 231#undef TASK_REGOFF
258#undef ETRAP_PSTATE1 232#undef ETRAP_PSTATE1
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index b49dcd4504b0..3eadac5e171e 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -26,6 +26,7 @@
26#include <asm/head.h> 26#include <asm/head.h>
27#include <asm/ttable.h> 27#include <asm/ttable.h>
28#include <asm/mmu.h> 28#include <asm/mmu.h>
29#include <asm/cpudata.h>
29 30
30/* This section from from _start to sparc64_boot_end should fit into 31/* This section from from _start to sparc64_boot_end should fit into
31 * 0x0000000000404000 to 0x0000000000408000. 32 * 0x0000000000404000 to 0x0000000000408000.
@@ -94,12 +95,17 @@ sparc64_boot:
94 wrpr %g1, 0x0, %pstate 95 wrpr %g1, 0x0, %pstate
95 ba,a,pt %xcc, 1f 96 ba,a,pt %xcc, 1f
96 97
97 .globl prom_finddev_name, prom_chosen_path 98 .globl prom_finddev_name, prom_chosen_path, prom_root_node
98 .globl prom_getprop_name, prom_mmu_name 99 .globl prom_getprop_name, prom_mmu_name, prom_peer_name
99 .globl prom_callmethod_name, prom_translate_name 100 .globl prom_callmethod_name, prom_translate_name, prom_root_compatible
100 .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache 101 .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
101 .globl prom_boot_mapped_pc, prom_boot_mapping_mode 102 .globl prom_boot_mapped_pc, prom_boot_mapping_mode
102 .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low 103 .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
104 .globl is_sun4v
105prom_peer_name:
106 .asciz "peer"
107prom_compatible_name:
108 .asciz "compatible"
103prom_finddev_name: 109prom_finddev_name:
104 .asciz "finddevice" 110 .asciz "finddevice"
105prom_chosen_path: 111prom_chosen_path:
@@ -116,7 +122,13 @@ prom_map_name:
116 .asciz "map" 122 .asciz "map"
117prom_unmap_name: 123prom_unmap_name:
118 .asciz "unmap" 124 .asciz "unmap"
125prom_sun4v_name:
126 .asciz "sun4v"
119 .align 4 127 .align 4
128prom_root_compatible:
129 .skip 64
130prom_root_node:
131 .word 0
120prom_mmu_ihandle_cache: 132prom_mmu_ihandle_cache:
121 .word 0 133 .word 0
122prom_boot_mapped_pc: 134prom_boot_mapped_pc:
@@ -128,8 +140,54 @@ prom_boot_mapping_phys_high:
128 .xword 0 140 .xword 0
129prom_boot_mapping_phys_low: 141prom_boot_mapping_phys_low:
130 .xword 0 142 .xword 0
143is_sun4v:
144 .word 0
1311: 1451:
132 rd %pc, %l0 146 rd %pc, %l0
147
148 mov (1b - prom_peer_name), %l1
149 sub %l0, %l1, %l1
150 mov 0, %l2
151
152 /* prom_root_node = prom_peer(0) */
153 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer"
154 mov 1, %l3
155 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
156 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
157 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0
158 stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
159 call %l7
160 add %sp, (2047 + 128), %o0 ! argument array
161
162 ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node
163 mov (1b - prom_root_node), %l1
164 sub %l0, %l1, %l1
165 stw %l4, [%l1]
166
167 mov (1b - prom_getprop_name), %l1
168 mov (1b - prom_compatible_name), %l2
169 mov (1b - prom_root_compatible), %l5
170 sub %l0, %l1, %l1
171 sub %l0, %l2, %l2
172 sub %l0, %l5, %l5
173
174 /* prom_getproperty(prom_root_node, "compatible",
175 * &prom_root_compatible, 64)
176 */
177 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
178 mov 4, %l3
179 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
180 mov 1, %l3
181 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
182 stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node
183 stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible"
184 stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible
185 mov 64, %l3
186 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size
187 stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
188 call %l7
189 add %sp, (2047 + 128), %o0 ! argument array
190
133 mov (1b - prom_finddev_name), %l1 191 mov (1b - prom_finddev_name), %l1
134 mov (1b - prom_chosen_path), %l2 192 mov (1b - prom_chosen_path), %l2
135 mov (1b - prom_boot_mapped_pc), %l3 193 mov (1b - prom_boot_mapped_pc), %l3
@@ -238,6 +296,27 @@ prom_boot_mapping_phys_low:
238 add %sp, (192 + 128), %sp 296 add %sp, (192 + 128), %sp
239 297
240sparc64_boot_after_remap: 298sparc64_boot_after_remap:
299 sethi %hi(prom_root_compatible), %g1
300 or %g1, %lo(prom_root_compatible), %g1
301 sethi %hi(prom_sun4v_name), %g7
302 or %g7, %lo(prom_sun4v_name), %g7
303 mov 5, %g3
3041: ldub [%g7], %g2
305 ldub [%g1], %g4
306 cmp %g2, %g4
307 bne,pn %icc, 2f
308 add %g7, 1, %g7
309 subcc %g3, 1, %g3
310 bne,pt %xcc, 1b
311 add %g1, 1, %g1
312
313 sethi %hi(is_sun4v), %g1
314 or %g1, %lo(is_sun4v), %g1
315 mov 1, %g7
316 stw %g7, [%g1]
317
3182:
319 BRANCH_IF_SUN4V(g1, jump_to_sun4u_init)
241 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) 320 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
242 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) 321 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
243 ba,pt %xcc, spitfire_boot 322 ba,pt %xcc, spitfire_boot
@@ -301,20 +380,58 @@ jump_to_sun4u_init:
301 nop 380 nop
302 381
303sun4u_init: 382sun4u_init:
383 BRANCH_IF_SUN4V(g1, sun4v_init)
384
304 /* Set ctx 0 */ 385 /* Set ctx 0 */
305 mov PRIMARY_CONTEXT, %g7 386 mov PRIMARY_CONTEXT, %g7
306 stxa %g0, [%g7] ASI_DMMU 387 stxa %g0, [%g7] ASI_DMMU
307 membar #Sync 388 membar #Sync
308 389
309 mov SECONDARY_CONTEXT, %g7 390 mov SECONDARY_CONTEXT, %g7
310 stxa %g0, [%g7] ASI_DMMU 391 stxa %g0, [%g7] ASI_DMMU
311 membar #Sync 392 membar #Sync
312 393
313 BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) 394 ba,pt %xcc, sun4u_continue
395 nop
396
397sun4v_init:
398 /* Set ctx 0 */
399 mov PRIMARY_CONTEXT, %g7
400 stxa %g0, [%g7] ASI_MMU
401 membar #Sync
402
403 mov SECONDARY_CONTEXT, %g7
404 stxa %g0, [%g7] ASI_MMU
405 membar #Sync
406 ba,pt %xcc, niagara_tlb_fixup
407 nop
408
409sun4u_continue:
410 BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
314 411
315 ba,pt %xcc, spitfire_tlb_fixup 412 ba,pt %xcc, spitfire_tlb_fixup
316 nop 413 nop
317 414
415niagara_tlb_fixup:
416 mov 3, %g2 /* Set TLB type to hypervisor. */
417 sethi %hi(tlb_type), %g1
418 stw %g2, [%g1 + %lo(tlb_type)]
419
420 /* Patch copy/clear ops. */
421 call niagara_patch_copyops
422 nop
423 call niagara_patch_bzero
424 nop
425 call niagara_patch_pageops
426 nop
427
428 /* Patch TLB/cache ops. */
429 call hypervisor_patch_cachetlbops
430 nop
431
432 ba,pt %xcc, tlb_fixup_done
433 nop
434
318cheetah_tlb_fixup: 435cheetah_tlb_fixup:
319 mov 2, %g2 /* Set TLB type to cheetah+. */ 436 mov 2, %g2 /* Set TLB type to cheetah+. */
320 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) 437 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
@@ -411,85 +528,55 @@ setup_trap_table:
411 wrpr %g0, 15, %pil 528 wrpr %g0, 15, %pil
412 529
413 /* Make the firmware call to jump over to the Linux trap table. */ 530 /* Make the firmware call to jump over to the Linux trap table. */
414 call prom_set_trap_table 531 sethi %hi(is_sun4v), %o0
415 sethi %hi(sparc64_ttable_tl0), %o0 532 lduw [%o0 + %lo(is_sun4v)], %o0
533 brz,pt %o0, 1f
534 nop
416 535
417 /* Start using proper page size encodings in ctx register. */ 536 TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
418 sethi %hi(sparc64_kern_pri_context), %g3 537 add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
419 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 538 stxa %g2, [%g0] ASI_SCRATCHPAD
420 mov PRIMARY_CONTEXT, %g1
421 stxa %g2, [%g1] ASI_DMMU
422 membar #Sync
423 539
424 /* The Linux trap handlers expect various trap global registers 540 /* Compute physical address:
425 * to be setup with some fixed values. So here we set these
426 * up very carefully. These globals are:
427 *
428 * Alternate Globals (PSTATE_AG):
429 *
430 * %g6 --> current_thread_info()
431 *
432 * MMU Globals (PSTATE_MG):
433 *
434 * %g1 --> TLB_SFSR
435 * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB |
436 * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
437 * ^ 0xfffff80000000000)
438 * (this %g2 value is used for computing the PAGE_OFFSET kernel
439 * TLB entries quickly, the virtual address of the fault XOR'd
440 * with this %g2 value is the PTE to load into the TLB)
441 * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE
442 * 541 *
443 * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()): 542 * paddr = kern_base + (mmfsa_vaddr - KERNBASE)
444 *
445 * %g6 --> __irq_work[smp_processor_id()]
446 */ 543 */
544 sethi %hi(KERNBASE), %g3
545 sub %g2, %g3, %g2
546 sethi %hi(kern_base), %g3
547 ldx [%g3 + %lo(kern_base)], %g3
548 add %g2, %g3, %o1
447 549
448 rdpr %pstate, %o1 550 call prom_set_trap_table_sun4v
449 mov %g6, %o2 551 sethi %hi(sparc64_ttable_tl0), %o0
450 wrpr %o1, PSTATE_AG, %pstate 552
451 mov %o2, %g6 553 ba,pt %xcc, 2f
452
453#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
454#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
455 wrpr %o1, PSTATE_MG, %pstate
456 mov TSB_REG, %g1
457 stxa %g0, [%g1] ASI_DMMU
458 membar #Sync
459 stxa %g0, [%g1] ASI_IMMU
460 membar #Sync
461 mov TLB_SFSR, %g1
462 sethi %uhi(KERN_HIGHBITS), %g2
463 or %g2, %ulo(KERN_HIGHBITS), %g2
464 sllx %g2, 32, %g2
465 or %g2, KERN_LOWBITS, %g2
466
467 BRANCH_IF_ANY_CHEETAH(g3,g7,8f)
468 ba,pt %xcc, 9f
469 nop 554 nop
470 555
4718: 5561: call prom_set_trap_table
472 sethi %uhi(VPTE_BASE_CHEETAH), %g3 557 sethi %hi(sparc64_ttable_tl0), %o0
473 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
474 ba,pt %xcc, 2f
475 sllx %g3, 32, %g3
476 558
4779: 559 /* Start using proper page size encodings in ctx register. */
478 sethi %uhi(VPTE_BASE_SPITFIRE), %g3 5602: sethi %hi(sparc64_kern_pri_context), %g3
479 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 561 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
480 sllx %g3, 32, %g3
481 562
4822: 563 mov PRIMARY_CONTEXT, %g1
483 clr %g7 564
484#undef KERN_HIGHBITS 565661: stxa %g2, [%g1] ASI_DMMU
485#undef KERN_LOWBITS 566 .section .sun4v_1insn_patch, "ax"
567 .word 661b
568 stxa %g2, [%g1] ASI_MMU
569 .previous
570
571 membar #Sync
486 572
487 /* Kill PROM timer */ 573 /* Kill PROM timer */
488 sethi %hi(0x80000000), %o2 574 sethi %hi(0x80000000), %o2
489 sllx %o2, 32, %o2 575 sllx %o2, 32, %o2
490 wr %o2, 0, %tick_cmpr 576 wr %o2, 0, %tick_cmpr
491 577
492 BRANCH_IF_ANY_CHEETAH(o2,o3,1f) 578 BRANCH_IF_SUN4V(o2, 1f)
579 BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
493 580
494 ba,pt %xcc, 2f 581 ba,pt %xcc, 2f
495 nop 582 nop
@@ -502,7 +589,6 @@ setup_trap_table:
502 589
5032: 5902:
504 wrpr %g0, %g0, %wstate 591 wrpr %g0, %g0, %wstate
505 wrpr %o1, 0x0, %pstate
506 592
507 call init_irqwork_curcpu 593 call init_irqwork_curcpu
508 nop 594 nop
@@ -517,7 +603,7 @@ setup_trap_table:
517 restore 603 restore
518 604
519 .globl setup_tba 605 .globl setup_tba
520setup_tba: /* i0 = is_starfire */ 606setup_tba:
521 save %sp, -192, %sp 607 save %sp, -192, %sp
522 608
523 /* The boot processor is the only cpu which invokes this 609 /* The boot processor is the only cpu which invokes this
@@ -536,31 +622,35 @@ setup_tba: /* i0 = is_starfire */
536 restore 622 restore
537sparc64_boot_end: 623sparc64_boot_end:
538 624
539#include "systbls.S"
540#include "ktlb.S" 625#include "ktlb.S"
626#include "tsb.S"
541#include "etrap.S" 627#include "etrap.S"
542#include "rtrap.S" 628#include "rtrap.S"
543#include "winfixup.S" 629#include "winfixup.S"
544#include "entry.S" 630#include "entry.S"
631#include "sun4v_tlb_miss.S"
632#include "sun4v_ivec.S"
545 633
546/* 634/*
547 * The following skip makes sure the trap table in ttable.S is aligned 635 * The following skip makes sure the trap table in ttable.S is aligned
548 * on a 32K boundary as required by the v9 specs for TBA register. 636 * on a 32K boundary as required by the v9 specs for TBA register.
637 *
638 * We align to a 32K boundary, then we have the 32K kernel TSB,
639 * then the 32K aligned trap table.
549 */ 640 */
5501: 6411:
551 .skip 0x4000 + _start - 1b 642 .skip 0x4000 + _start - 1b
552 643
553#ifdef CONFIG_SBUS 644 .globl swapper_tsb
554/* This is just a hack to fool make depend config.h discovering 645swapper_tsb:
555 strategy: As the .S files below need config.h, but 646 .skip (32 * 1024)
556 make depend does not find it for them, we include config.h
557 in head.S */
558#endif
559 647
560! 0x0000000000408000 648! 0x0000000000408000
561 649
562#include "ttable.S" 650#include "ttable.S"
563 651
652#include "systbls.S"
653
564 .data 654 .data
565 .align 8 655 .align 8
566 .globl prom_tba, tlb_type 656 .globl prom_tba, tlb_type
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 233526ba3abe..8c93ba655b33 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -21,6 +21,7 @@
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/proc_fs.h> 22#include <linux/proc_fs.h>
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include <linux/bootmem.h>
24 25
25#include <asm/ptrace.h> 26#include <asm/ptrace.h>
26#include <asm/processor.h> 27#include <asm/processor.h>
@@ -39,6 +40,7 @@
39#include <asm/cache.h> 40#include <asm/cache.h>
40#include <asm/cpudata.h> 41#include <asm/cpudata.h>
41#include <asm/auxio.h> 42#include <asm/auxio.h>
43#include <asm/head.h>
42 44
43#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
44static void distribute_irqs(void); 46static void distribute_irqs(void);
@@ -136,12 +138,48 @@ out_unlock:
136 return 0; 138 return 0;
137} 139}
138 140
141extern unsigned long real_hard_smp_processor_id(void);
142
143static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
144{
145 unsigned int tid;
146
147 if (this_is_starfire) {
148 tid = starfire_translate(imap, cpuid);
149 tid <<= IMAP_TID_SHIFT;
150 tid &= IMAP_TID_UPA;
151 } else {
152 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
153 unsigned long ver;
154
155 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
156 if ((ver >> 32UL) == __JALAPENO_ID ||
157 (ver >> 32UL) == __SERRANO_ID) {
158 tid = cpuid << IMAP_TID_SHIFT;
159 tid &= IMAP_TID_JBUS;
160 } else {
161 unsigned int a = cpuid & 0x1f;
162 unsigned int n = (cpuid >> 5) & 0x1f;
163
164 tid = ((a << IMAP_AID_SHIFT) |
165 (n << IMAP_NID_SHIFT));
166 tid &= (IMAP_AID_SAFARI |
167 IMAP_NID_SAFARI);;
168 }
169 } else {
170 tid = cpuid << IMAP_TID_SHIFT;
171 tid &= IMAP_TID_UPA;
172 }
173 }
174
175 return tid;
176}
177
139/* Now these are always passed a true fully specified sun4u INO. */ 178/* Now these are always passed a true fully specified sun4u INO. */
140void enable_irq(unsigned int irq) 179void enable_irq(unsigned int irq)
141{ 180{
142 struct ino_bucket *bucket = __bucket(irq); 181 struct ino_bucket *bucket = __bucket(irq);
143 unsigned long imap; 182 unsigned long imap, cpuid;
144 unsigned long tid;
145 183
146 imap = bucket->imap; 184 imap = bucket->imap;
147 if (imap == 0UL) 185 if (imap == 0UL)
@@ -149,47 +187,38 @@ void enable_irq(unsigned int irq)
149 187
150 preempt_disable(); 188 preempt_disable();
151 189
152 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 190 /* This gets the physical processor ID, even on uniprocessor,
153 unsigned long ver; 191 * so we can always program the interrupt target correctly.
154 192 */
155 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 193 cpuid = real_hard_smp_processor_id();
156 if ((ver >> 32) == 0x003e0016) { 194
157 /* We set it to our JBUS ID. */ 195 if (tlb_type == hypervisor) {
158 __asm__ __volatile__("ldxa [%%g0] %1, %0" 196 unsigned int ino = __irq_ino(irq);
159 : "=r" (tid) 197 int err;
160 : "i" (ASI_JBUS_CONFIG)); 198
161 tid = ((tid & (0x1fUL<<17)) << 9); 199 err = sun4v_intr_settarget(ino, cpuid);
162 tid &= IMAP_TID_JBUS; 200 if (err != HV_EOK)
163 } else { 201 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
164 /* We set it to our Safari AID. */ 202 ino, cpuid, err);
165 __asm__ __volatile__("ldxa [%%g0] %1, %0" 203 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
166 : "=r" (tid) 204 if (err != HV_EOK)
167 : "i" (ASI_SAFARI_CONFIG)); 205 printk("sun4v_intr_setenabled(%x): err(%d)\n",
168 tid = ((tid & (0x3ffUL<<17)) << 9); 206 ino, err);
169 tid &= IMAP_AID_SAFARI;
170 }
171 } else if (this_is_starfire == 0) {
172 /* We set it to our UPA MID. */
173 __asm__ __volatile__("ldxa [%%g0] %1, %0"
174 : "=r" (tid)
175 : "i" (ASI_UPA_CONFIG));
176 tid = ((tid & UPA_CONFIG_MID) << 9);
177 tid &= IMAP_TID_UPA;
178 } else { 207 } else {
179 tid = (starfire_translate(imap, smp_processor_id()) << 26); 208 unsigned int tid = sun4u_compute_tid(imap, cpuid);
180 tid &= IMAP_TID_UPA; 209
210 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
211 * of this SYSIO's preconfigured IGN in the SYSIO Control
212 * Register, the hardware just mirrors that value here.
213 * However for Graphics and UPA Slave devices the full
214 * IMAP_INR field can be set by the programmer here.
215 *
216 * Things like FFB can now be handled via the new IRQ
217 * mechanism.
218 */
219 upa_writel(tid | IMAP_VALID, imap);
181 } 220 }
182 221
183 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
184 * of this SYSIO's preconfigured IGN in the SYSIO Control
185 * Register, the hardware just mirrors that value here.
186 * However for Graphics and UPA Slave devices the full
187 * IMAP_INR field can be set by the programmer here.
188 *
189 * Things like FFB can now be handled via the new IRQ mechanism.
190 */
191 upa_writel(tid | IMAP_VALID, imap);
192
193 preempt_enable(); 222 preempt_enable();
194} 223}
195 224
@@ -201,16 +230,26 @@ void disable_irq(unsigned int irq)
201 230
202 imap = bucket->imap; 231 imap = bucket->imap;
203 if (imap != 0UL) { 232 if (imap != 0UL) {
204 u32 tmp; 233 if (tlb_type == hypervisor) {
234 unsigned int ino = __irq_ino(irq);
235 int err;
236
237 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
238 if (err != HV_EOK)
239 printk("sun4v_intr_setenabled(%x): "
240 "err(%d)\n", ino, err);
241 } else {
242 u32 tmp;
205 243
206 /* NOTE: We do not want to futz with the IRQ clear registers 244 /* NOTE: We do not want to futz with the IRQ clear registers
207 * and move the state to IDLE, the SCSI code does call 245 * and move the state to IDLE, the SCSI code does call
208 * disable_irq() to assure atomicity in the queue cmd 246 * disable_irq() to assure atomicity in the queue cmd
209 * SCSI adapter driver code. Thus we'd lose interrupts. 247 * SCSI adapter driver code. Thus we'd lose interrupts.
210 */ 248 */
211 tmp = upa_readl(imap); 249 tmp = upa_readl(imap);
212 tmp &= ~IMAP_VALID; 250 tmp &= ~IMAP_VALID;
213 upa_writel(tmp, imap); 251 upa_writel(tmp, imap);
252 }
214 } 253 }
215} 254}
216 255
@@ -248,6 +287,8 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
248 return __irq(&pil0_dummy_bucket); 287 return __irq(&pil0_dummy_bucket);
249 } 288 }
250 289
290 BUG_ON(tlb_type == hypervisor);
291
251 /* RULE: Both must be specified in all other cases. */ 292 /* RULE: Both must be specified in all other cases. */
252 if (iclr == 0UL || imap == 0UL) { 293 if (iclr == 0UL || imap == 0UL) {
253 prom_printf("Invalid build_irq %d %d %016lx %016lx\n", 294 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
@@ -275,12 +316,11 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
275 goto out; 316 goto out;
276 } 317 }
277 318
278 bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); 319 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
279 if (!bucket->irq_info) { 320 if (!bucket->irq_info) {
280 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); 321 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
281 prom_halt(); 322 prom_halt();
282 } 323 }
283 memset(bucket->irq_info, 0, sizeof(struct irq_desc));
284 324
285 /* Ok, looks good, set it up. Don't touch the irq_chain or 325 /* Ok, looks good, set it up. Don't touch the irq_chain or
286 * the pending flag. 326 * the pending flag.
@@ -294,6 +334,37 @@ out:
294 return __irq(bucket); 334 return __irq(bucket);
295} 335}
296 336
337unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags)
338{
339 struct ino_bucket *bucket;
340 unsigned long sysino;
341
342 sysino = sun4v_devino_to_sysino(devhandle, devino);
343
344 bucket = &ivector_table[sysino];
345
346 /* Catch accidental accesses to these things. IMAP/ICLR handling
347 * is done by hypervisor calls on sun4v platforms, not by direct
348 * register accesses.
349 *
350 * But we need to make them look unique for the disable_irq() logic
351 * in free_irq().
352 */
353 bucket->imap = ~0UL - sysino;
354 bucket->iclr = ~0UL - sysino;
355
356 bucket->pil = pil;
357 bucket->flags = flags;
358
359 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
360 if (!bucket->irq_info) {
361 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
362 prom_halt();
363 }
364
365 return __irq(bucket);
366}
367
297static void atomic_bucket_insert(struct ino_bucket *bucket) 368static void atomic_bucket_insert(struct ino_bucket *bucket)
298{ 369{
299 unsigned long pstate; 370 unsigned long pstate;
@@ -482,7 +553,6 @@ void free_irq(unsigned int irq, void *dev_id)
482 bucket = __bucket(irq); 553 bucket = __bucket(irq);
483 if (bucket != &pil0_dummy_bucket) { 554 if (bucket != &pil0_dummy_bucket) {
484 struct irq_desc *desc = bucket->irq_info; 555 struct irq_desc *desc = bucket->irq_info;
485 unsigned long imap = bucket->imap;
486 int ent, i; 556 int ent, i;
487 557
488 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { 558 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
@@ -495,6 +565,8 @@ void free_irq(unsigned int irq, void *dev_id)
495 } 565 }
496 566
497 if (!desc->action_active_mask) { 567 if (!desc->action_active_mask) {
568 unsigned long imap = bucket->imap;
569
498 /* This unique interrupt source is now inactive. */ 570 /* This unique interrupt source is now inactive. */
499 bucket->flags &= ~IBF_ACTIVE; 571 bucket->flags &= ~IBF_ACTIVE;
500 572
@@ -592,7 +664,18 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
592 break; 664 break;
593 } 665 }
594 if (bp->pil != 0) { 666 if (bp->pil != 0) {
595 upa_writel(ICLR_IDLE, bp->iclr); 667 if (tlb_type == hypervisor) {
668 unsigned int ino = __irq_ino(bp);
669 int err;
670
671 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
672 if (err != HV_EOK)
673 printk("sun4v_intr_setstate(%x): "
674 "err(%d)\n", ino, err);
675 } else {
676 upa_writel(ICLR_IDLE, bp->iclr);
677 }
678
596 /* Test and add entropy */ 679 /* Test and add entropy */
597 if (random & SA_SAMPLE_RANDOM) 680 if (random & SA_SAMPLE_RANDOM)
598 add_interrupt_randomness(irq); 681 add_interrupt_randomness(irq);
@@ -694,7 +777,7 @@ irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
694 val = readb(auxio_register); 777 val = readb(auxio_register);
695 val |= AUXIO_AUX1_FTCNT; 778 val |= AUXIO_AUX1_FTCNT;
696 writeb(val, auxio_register); 779 writeb(val, auxio_register);
697 val &= AUXIO_AUX1_FTCNT; 780 val &= ~AUXIO_AUX1_FTCNT;
698 writeb(val, auxio_register); 781 writeb(val, auxio_register);
699 782
700 doing_pdma = 0; 783 doing_pdma = 0;
@@ -727,25 +810,23 @@ EXPORT_SYMBOL(probe_irq_off);
727static int retarget_one_irq(struct irqaction *p, int goal_cpu) 810static int retarget_one_irq(struct irqaction *p, int goal_cpu)
728{ 811{
729 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; 812 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
730 unsigned long imap = bucket->imap;
731 unsigned int tid;
732 813
733 while (!cpu_online(goal_cpu)) { 814 while (!cpu_online(goal_cpu)) {
734 if (++goal_cpu >= NR_CPUS) 815 if (++goal_cpu >= NR_CPUS)
735 goal_cpu = 0; 816 goal_cpu = 0;
736 } 817 }
737 818
738 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 819 if (tlb_type == hypervisor) {
739 tid = goal_cpu << 26; 820 unsigned int ino = __irq_ino(bucket);
740 tid &= IMAP_AID_SAFARI; 821
741 } else if (this_is_starfire == 0) { 822 sun4v_intr_settarget(ino, goal_cpu);
742 tid = goal_cpu << 26; 823 sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
743 tid &= IMAP_TID_UPA;
744 } else { 824 } else {
745 tid = (starfire_translate(imap, goal_cpu) << 26); 825 unsigned long imap = bucket->imap;
746 tid &= IMAP_TID_UPA; 826 unsigned int tid = sun4u_compute_tid(imap, goal_cpu);
827
828 upa_writel(tid | IMAP_VALID, imap);
747 } 829 }
748 upa_writel(tid | IMAP_VALID, imap);
749 830
750 do { 831 do {
751 if (++goal_cpu >= NR_CPUS) 832 if (++goal_cpu >= NR_CPUS)
@@ -848,33 +929,114 @@ static void kill_prom_timer(void)
848 929
849void init_irqwork_curcpu(void) 930void init_irqwork_curcpu(void)
850{ 931{
851 register struct irq_work_struct *workp asm("o2");
852 register unsigned long tmp asm("o3");
853 int cpu = hard_smp_processor_id(); 932 int cpu = hard_smp_processor_id();
854 933
855 memset(__irq_work + cpu, 0, sizeof(*workp)); 934 memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct));
856 935}
857 /* Make sure we are called with PSTATE_IE disabled. */ 936
858 __asm__ __volatile__("rdpr %%pstate, %0\n\t" 937static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
859 : "=r" (tmp)); 938{
860 if (tmp & PSTATE_IE) { 939 unsigned long num_entries = 128;
861 prom_printf("BUG: init_irqwork_curcpu() called with " 940 unsigned long status;
862 "PSTATE_IE enabled, bailing.\n"); 941
863 __asm__ __volatile__("mov %%i7, %0\n\t" 942 status = sun4v_cpu_qconf(type, paddr, num_entries);
864 : "=r" (tmp)); 943 if (status != HV_EOK) {
865 prom_printf("BUG: Called from %lx\n", tmp); 944 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
945 "err %lu\n", type, paddr, num_entries, status);
866 prom_halt(); 946 prom_halt();
867 } 947 }
948}
868 949
869 /* Set interrupt globals. */ 950static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
870 workp = &__irq_work[cpu]; 951{
871 __asm__ __volatile__( 952 struct trap_per_cpu *tb = &trap_block[this_cpu];
872 "rdpr %%pstate, %0\n\t" 953
873 "wrpr %0, %1, %%pstate\n\t" 954 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
874 "mov %2, %%g6\n\t" 955 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
875 "wrpr %0, 0x0, %%pstate\n\t" 956 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
876 : "=&r" (tmp) 957 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
877 : "i" (PSTATE_IG), "r" (workp)); 958}
959
960static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
961{
962 void *page;
963
964 if (use_bootmem)
965 page = alloc_bootmem_low_pages(PAGE_SIZE);
966 else
967 page = (void *) get_zeroed_page(GFP_ATOMIC);
968
969 if (!page) {
970 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
971 prom_halt();
972 }
973
974 *pa_ptr = __pa(page);
975}
976
977static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
978{
979 void *page;
980
981 if (use_bootmem)
982 page = alloc_bootmem_low_pages(PAGE_SIZE);
983 else
984 page = (void *) get_zeroed_page(GFP_ATOMIC);
985
986 if (!page) {
987 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
988 prom_halt();
989 }
990
991 *pa_ptr = __pa(page);
992}
993
994static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
995{
996#ifdef CONFIG_SMP
997 void *page;
998
999 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
1000
1001 if (use_bootmem)
1002 page = alloc_bootmem_low_pages(PAGE_SIZE);
1003 else
1004 page = (void *) get_zeroed_page(GFP_ATOMIC);
1005
1006 if (!page) {
1007 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
1008 prom_halt();
1009 }
1010
1011 tb->cpu_mondo_block_pa = __pa(page);
1012 tb->cpu_list_pa = __pa(page + 64);
1013#endif
1014}
1015
1016/* Allocate and register the mondo and error queues for this cpu. */
1017void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
1018{
1019 struct trap_per_cpu *tb = &trap_block[cpu];
1020
1021 if (alloc) {
1022 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
1023 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
1024 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
1025 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
1026 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
1027 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
1028
1029 init_cpu_send_mondo_info(tb, use_bootmem);
1030 }
1031
1032 if (load) {
1033 if (cpu != hard_smp_processor_id()) {
1034 prom_printf("SUN4V: init mondo on cpu %d not %d\n",
1035 cpu, hard_smp_processor_id());
1036 prom_halt();
1037 }
1038 sun4v_register_mondo_queues(cpu);
1039 }
878} 1040}
879 1041
880/* Only invoked on boot processor. */ 1042/* Only invoked on boot processor. */
@@ -884,6 +1046,9 @@ void __init init_IRQ(void)
884 kill_prom_timer(); 1046 kill_prom_timer();
885 memset(&ivector_table[0], 0, sizeof(ivector_table)); 1047 memset(&ivector_table[0], 0, sizeof(ivector_table));
886 1048
1049 if (tlb_type == hypervisor)
1050 sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
1051
887 /* We need to clear any IRQ's pending in the soft interrupt 1052 /* We need to clear any IRQ's pending in the soft interrupt
888 * registers, a spurious one could be left around from the 1053 * registers, a spurious one could be left around from the
889 * PROM timer which we just disabled. 1054 * PROM timer which we just disabled.
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
deleted file mode 100644
index 4951ff8f6877..000000000000
--- a/arch/sparc64/kernel/itlb_base.S
+++ /dev/null
@@ -1,79 +0,0 @@
1/* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $
2 * itlb_base.S: Front end to ITLB miss replacement strategy.
3 * This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#if PAGE_SHIFT == 13
10/*
11 * To compute vpte offset, we need to do ((addr >> 13) << 3),
12 * which can be optimized to (addr >> 10) if bits 10/11/12 can
13 * be guaranteed to be 0 ... mmu_context.h does guarantee this
14 * by only using 10 bits in the hwcontext value.
15 */
16#define CREATE_VPTE_OFFSET1(r1, r2) \
17 srax r1, 10, r2
18#define CREATE_VPTE_OFFSET2(r1, r2) nop
19#else /* PAGE_SHIFT */
20#define CREATE_VPTE_OFFSET1(r1, r2) \
21 srax r1, PAGE_SHIFT, r2
22#define CREATE_VPTE_OFFSET2(r1, r2) \
23 sllx r2, 3, r2
24#endif /* PAGE_SHIFT */
25
26
27/* Ways we can get here:
28 *
29 * 1) Nucleus instruction misses from module code.
30 * 2) All user instruction misses.
31 *
32 * All real page faults merge their code paths to the
33 * sparc64_realfault_common label below.
34 */
35
36/* ITLB ** ICACHE line 1: Quick user TLB misses */
37 mov TLB_SFSR, %g1
38 ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS
39 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
40 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
41 ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE
421: brgez,pn %g5, 3f ! Not valid, branch out
43 sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot
44 andcc %g5, %g4, %g0 ! Executable?
45
46/* ITLB ** ICACHE line 2: Real faults */
47 be,pn %xcc, 3f ! Nope, branch.
48 nop ! Delay-slot
492: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
50 retry ! Trap return
513: rdpr %pstate, %g4 ! Move into alt-globals
52 wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate
53 rdpr %tpc, %g5 ! And load faulting VA
54 mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
55
56/* ITLB ** ICACHE line 3: Finish faults */
57sparc64_realfault_common: ! Called by dtlb_miss
58 stb %g4, [%g6 + TI_FAULT_CODE]
59 stx %g5, [%g6 + TI_FAULT_ADDR]
60 ba,pt %xcc, etrap ! Save state
611: rd %pc, %g7 ! ...
62 call do_sparc64_fault ! Call fault handler
63 add %sp, PTREGS_OFF, %o0! Compute pt_regs arg
64 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
65 nop
66
67/* ITLB ** ICACHE line 4: Window fixups */
68winfix_trampoline:
69 rdpr %tpc, %g3 ! Prepare winfixup TNPC
70 or %g3, 0x7c, %g3 ! Compute branch offset
71 wrpr %g3, %tnpc ! Write it into TNPC
72 done ! Do it to it
73 nop
74 nop
75 nop
76 nop
77
78#undef CREATE_VPTE_OFFSET1
79#undef CREATE_VPTE_OFFSET2
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
new file mode 100644
index 000000000000..ad46e2024f4b
--- /dev/null
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -0,0 +1,39 @@
1/* ITLB ** ICACHE line 1: Context 0 check and TSB load */
2 ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer
3 ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET
4 srlx %g6, 48, %g5 ! Get context
5 sllx %g6, 22, %g6 ! Zero out context
6 brz,pn %g5, kvmap_itlb ! Context 0 processing
7 srlx %g6, 22, %g6 ! Delay slot
8 TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
9 cmp %g4, %g6 ! Compare TAG
10
11/* ITLB ** ICACHE line 2: TSB compare and TLB load */
12 bne,pn %xcc, tsb_miss_itlb ! Miss
13 mov FAULT_CODE_ITLB, %g3
14 andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable?
15 be,pn %xcc, tsb_do_fault
16 nop ! Delay slot, fill me
17 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
18 retry ! Trap done
19 nop
20
21/* ITLB ** ICACHE line 3: */
22 nop
23 nop
24 nop
25 nop
26 nop
27 nop
28 nop
29 nop
30
31/* ITLB ** ICACHE line 4: */
32 nop
33 nop
34 nop
35 nop
36 nop
37 nop
38 nop
39 nop
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index d9244d3c9f73..31da1e564c95 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -4,191 +4,276 @@
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) 4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7*/ 7 */
8 8
9#include <linux/config.h> 9#include <linux/config.h>
10#include <asm/head.h> 10#include <asm/head.h>
11#include <asm/asi.h> 11#include <asm/asi.h>
12#include <asm/page.h> 12#include <asm/page.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable.h>
14#include <asm/tsb.h>
14 15
15 .text 16 .text
16 .align 32 17 .align 32
17 18
18/* 19kvmap_itlb:
19 * On a second level vpte miss, check whether the original fault is to the OBP 20 /* g6: TAG TARGET */
20 * range (note that this is only possible for instruction miss, data misses to 21 mov TLB_TAG_ACCESS, %g4
21 * obp range do not use vpte). If so, go back directly to the faulting address. 22 ldxa [%g4] ASI_IMMU, %g4
22 * This is because we want to read the tpc, otherwise we have no way of knowing 23
23 * the 8k aligned faulting address if we are using >8k kernel pagesize. This 24 /* sun4v_itlb_miss branches here with the missing virtual
24 * also ensures no vpte range addresses are dropped into tlb while obp is 25 * address already loaded into %g4
25 * executing (see inherit_locked_prom_mappings() rant).
26 */
27sparc64_vpte_nucleus:
28 /* Note that kvmap below has verified that the address is
29 * in the range MODULES_VADDR --> VMALLOC_END already. So
30 * here we need only check if it is an OBP address or not.
31 */ 26 */
27kvmap_itlb_4v:
28
29kvmap_itlb_nonlinear:
30 /* Catch kernel NULL pointer calls. */
31 sethi %hi(PAGE_SIZE), %g5
32 cmp %g4, %g5
33 bleu,pn %xcc, kvmap_dtlb_longpath
34 nop
35
36 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
37
38kvmap_itlb_tsb_miss:
32 sethi %hi(LOW_OBP_ADDRESS), %g5 39 sethi %hi(LOW_OBP_ADDRESS), %g5
33 cmp %g4, %g5 40 cmp %g4, %g5
34 blu,pn %xcc, kern_vpte 41 blu,pn %xcc, kvmap_itlb_vmalloc_addr
35 mov 0x1, %g5 42 mov 0x1, %g5
36 sllx %g5, 32, %g5 43 sllx %g5, 32, %g5
37 cmp %g4, %g5 44 cmp %g4, %g5
38 blu,pn %xcc, vpte_insn_obp 45 blu,pn %xcc, kvmap_itlb_obp
39 nop 46 nop
40 47
41 /* These two instructions are patched by paginig_init(). */ 48kvmap_itlb_vmalloc_addr:
42kern_vpte: 49 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
43 sethi %hi(swapper_pgd_zero), %g5
44 lduw [%g5 + %lo(swapper_pgd_zero)], %g5
45 50
46 /* With kernel PGD in %g5, branch back into dtlb_backend. */ 51 KTSB_LOCK_TAG(%g1, %g2, %g7)
47 ba,pt %xcc, sparc64_kpte_continue
48 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
49 52
50vpte_noent: 53 /* Load and check PTE. */
51 /* Restore previous TAG_ACCESS, %g5 is zero, and we will 54 ldxa [%g5] ASI_PHYS_USE_EC, %g5
52 * skip over the trap instruction so that the top level 55 mov 1, %g7
53 * TLB miss handler will thing this %g5 value is just an 56 sllx %g7, TSB_TAG_INVALID_BIT, %g7
54 * invalid PTE, thus branching to full fault processing. 57 brgez,a,pn %g5, kvmap_itlb_longpath
55 */ 58 KTSB_STORE(%g1, %g7)
56 mov TLB_SFSR, %g1 59
57 stxa %g4, [%g1 + %g1] ASI_DMMU 60 KTSB_WRITE(%g1, %g5, %g6)
58 done 61
59 62 /* fallthrough to TLB load */
60vpte_insn_obp:
61 /* Behave as if we are at TL0. */
62 wrpr %g0, 1, %tl
63 rdpr %tpc, %g4 /* Find original faulting iaddr */
64 srlx %g4, 13, %g4 /* Throw out context bits */
65 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
66
67 /* Restore previous TAG_ACCESS. */
68 mov TLB_SFSR, %g1
69 stxa %g4, [%g1 + %g1] ASI_IMMU
70
71 sethi %hi(prom_trans), %g5
72 or %g5, %lo(prom_trans), %g5
73
741: ldx [%g5 + 0x00], %g6 ! base
75 brz,a,pn %g6, longpath ! no more entries, fail
76 mov TLB_SFSR, %g1 ! and restore %g1
77 ldx [%g5 + 0x08], %g1 ! len
78 add %g6, %g1, %g1 ! end
79 cmp %g6, %g4
80 bgu,pt %xcc, 2f
81 cmp %g4, %g1
82 bgeu,pt %xcc, 2f
83 ldx [%g5 + 0x10], %g1 ! PTE
84
85 /* TLB load, restore %g1, and return from trap. */
86 sub %g4, %g6, %g6
87 add %g1, %g6, %g5
88 mov TLB_SFSR, %g1
89 stxa %g5, [%g0] ASI_ITLB_DATA_IN
90 retry
91 63
922: ba,pt %xcc, 1b 64kvmap_itlb_load:
93 add %g5, (3 * 8), %g5 ! next entry 65
94 66661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
95kvmap_do_obp:
96 sethi %hi(prom_trans), %g5
97 or %g5, %lo(prom_trans), %g5
98 srlx %g4, 13, %g4
99 sllx %g4, 13, %g4
100
1011: ldx [%g5 + 0x00], %g6 ! base
102 brz,a,pn %g6, longpath ! no more entries, fail
103 mov TLB_SFSR, %g1 ! and restore %g1
104 ldx [%g5 + 0x08], %g1 ! len
105 add %g6, %g1, %g1 ! end
106 cmp %g6, %g4
107 bgu,pt %xcc, 2f
108 cmp %g4, %g1
109 bgeu,pt %xcc, 2f
110 ldx [%g5 + 0x10], %g1 ! PTE
111
112 /* TLB load, restore %g1, and return from trap. */
113 sub %g4, %g6, %g6
114 add %g1, %g6, %g5
115 mov TLB_SFSR, %g1
116 stxa %g5, [%g0] ASI_DTLB_DATA_IN
117 retry 67 retry
68 .section .sun4v_2insn_patch, "ax"
69 .word 661b
70 nop
71 nop
72 .previous
73
74 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
75 * instruction get nop'd out and we get here to branch
76 * to the sun4v tlb load code. The registers are setup
77 * as follows:
78 *
79 * %g4: vaddr
80 * %g5: PTE
81 * %g6: TAG
82 *
83 * The sun4v TLB load wants the PTE in %g3 so we fix that
84 * up here.
85 */
86 ba,pt %xcc, sun4v_itlb_load
87 mov %g5, %g3
118 88
1192: ba,pt %xcc, 1b 89kvmap_itlb_longpath:
120 add %g5, (3 * 8), %g5 ! next entry 90
91661: rdpr %pstate, %g5
92 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
93 .section .sun4v_2insn_patch, "ax"
94 .word 661b
95 SET_GL(1)
96 nop
97 .previous
98
99 rdpr %tpc, %g5
100 ba,pt %xcc, sparc64_realfault_common
101 mov FAULT_CODE_ITLB, %g4
102
103kvmap_itlb_obp:
104 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
105
106 KTSB_LOCK_TAG(%g1, %g2, %g7)
107
108 KTSB_WRITE(%g1, %g5, %g6)
109
110 ba,pt %xcc, kvmap_itlb_load
111 nop
112
113kvmap_dtlb_obp:
114 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
115
116 KTSB_LOCK_TAG(%g1, %g2, %g7)
117
118 KTSB_WRITE(%g1, %g5, %g6)
119
120 ba,pt %xcc, kvmap_dtlb_load
121 nop
121 122
122/*
123 * On a first level data miss, check whether this is to the OBP range (note
124 * that such accesses can be made by prom, as well as by kernel using
125 * prom_getproperty on "address"), and if so, do not use vpte access ...
126 * rather, use information saved during inherit_prom_mappings() using 8k
127 * pagesize.
128 */
129 .align 32 123 .align 32
130kvmap: 124kvmap_dtlb_tsb4m_load:
131 brgez,pn %g4, kvmap_nonlinear 125 KTSB_LOCK_TAG(%g1, %g2, %g7)
126 KTSB_WRITE(%g1, %g5, %g6)
127 ba,pt %xcc, kvmap_dtlb_load
132 nop 128 nop
133 129
134#ifdef CONFIG_DEBUG_PAGEALLOC 130kvmap_dtlb:
131 /* %g6: TAG TARGET */
132 mov TLB_TAG_ACCESS, %g4
133 ldxa [%g4] ASI_DMMU, %g4
134
135 /* sun4v_dtlb_miss branches here with the missing virtual
136 * address already loaded into %g4
137 */
138kvmap_dtlb_4v:
139 brgez,pn %g4, kvmap_dtlb_nonlinear
140 nop
141
142 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
143 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
144
145 /* TSB entry address left in %g1, lookup linear PTE.
146 * Must preserve %g1 and %g6 (TAG).
147 */
148kvmap_dtlb_tsb4m_miss:
149 sethi %hi(kpte_linear_bitmap), %g2
150 or %g2, %lo(kpte_linear_bitmap), %g2
151
152 /* Clear the PAGE_OFFSET top virtual bits, then shift
153 * down to get a 256MB physical address index.
154 */
155 sllx %g4, 21, %g5
156 mov 1, %g7
157 srlx %g5, 21 + 28, %g5
158
159 /* Don't try this at home kids... this depends upon srlx
160 * only taking the low 6 bits of the shift count in %g5.
161 */
162 sllx %g7, %g5, %g7
163
164 /* Divide by 64 to get the offset into the bitmask. */
165 srlx %g5, 6, %g5
166 sllx %g5, 3, %g5
167
168 /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
169 ldx [%g2 + %g5], %g2
170 andcc %g2, %g7, %g0
171 sethi %hi(kern_linear_pte_xor), %g5
172 or %g5, %lo(kern_linear_pte_xor), %g5
173 bne,a,pt %xcc, 1f
174 add %g5, 8, %g5
175
1761: ldx [%g5], %g2
177
135 .globl kvmap_linear_patch 178 .globl kvmap_linear_patch
136kvmap_linear_patch: 179kvmap_linear_patch:
137#endif 180 ba,pt %xcc, kvmap_dtlb_tsb4m_load
138 ba,pt %xcc, kvmap_load
139 xor %g2, %g4, %g5 181 xor %g2, %g4, %g5
140 182
141#ifdef CONFIG_DEBUG_PAGEALLOC 183kvmap_dtlb_vmalloc_addr:
142 sethi %hi(swapper_pg_dir), %g5 184 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
143 or %g5, %lo(swapper_pg_dir), %g5 185
144 sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6 186 KTSB_LOCK_TAG(%g1, %g2, %g7)
145 srlx %g6, 64 - PAGE_SHIFT, %g6 187
146 andn %g6, 0x3, %g6 188 /* Load and check PTE. */
147 lduw [%g5 + %g6], %g5 189 ldxa [%g5] ASI_PHYS_USE_EC, %g5
148 brz,pn %g5, longpath 190 mov 1, %g7
149 sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6 191 sllx %g7, TSB_TAG_INVALID_BIT, %g7
150 srlx %g6, 64 - PAGE_SHIFT, %g6 192 brgez,a,pn %g5, kvmap_dtlb_longpath
151 sllx %g5, 11, %g5 193 KTSB_STORE(%g1, %g7)
152 andn %g6, 0x3, %g6 194
153 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 195 KTSB_WRITE(%g1, %g5, %g6)
154 brz,pn %g5, longpath 196
155 sllx %g4, 64 - PMD_SHIFT, %g6 197 /* fallthrough to TLB load */
156 srlx %g6, 64 - PAGE_SHIFT, %g6 198
157 sllx %g5, 11, %g5 199kvmap_dtlb_load:
158 andn %g6, 0x7, %g6 200
159 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 201661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
160 brz,pn %g5, longpath 202 retry
203 .section .sun4v_2insn_patch, "ax"
204 .word 661b
205 nop
206 nop
207 .previous
208
209 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
210 * instruction get nop'd out and we get here to branch
211 * to the sun4v tlb load code. The registers are setup
212 * as follows:
213 *
214 * %g4: vaddr
215 * %g5: PTE
216 * %g6: TAG
217 *
218 * The sun4v TLB load wants the PTE in %g3 so we fix that
219 * up here.
220 */
221 ba,pt %xcc, sun4v_dtlb_load
222 mov %g5, %g3
223
224kvmap_dtlb_nonlinear:
225 /* Catch kernel NULL pointer derefs. */
226 sethi %hi(PAGE_SIZE), %g5
227 cmp %g4, %g5
228 bleu,pn %xcc, kvmap_dtlb_longpath
161 nop 229 nop
162 ba,a,pt %xcc, kvmap_load
163#endif
164 230
165kvmap_nonlinear: 231 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
232
233kvmap_dtlb_tsbmiss:
166 sethi %hi(MODULES_VADDR), %g5 234 sethi %hi(MODULES_VADDR), %g5
167 cmp %g4, %g5 235 cmp %g4, %g5
168 blu,pn %xcc, longpath 236 blu,pn %xcc, kvmap_dtlb_longpath
169 mov (VMALLOC_END >> 24), %g5 237 mov (VMALLOC_END >> 24), %g5
170 sllx %g5, 24, %g5 238 sllx %g5, 24, %g5
171 cmp %g4, %g5 239 cmp %g4, %g5
172 bgeu,pn %xcc, longpath 240 bgeu,pn %xcc, kvmap_dtlb_longpath
173 nop 241 nop
174 242
175kvmap_check_obp: 243kvmap_check_obp:
176 sethi %hi(LOW_OBP_ADDRESS), %g5 244 sethi %hi(LOW_OBP_ADDRESS), %g5
177 cmp %g4, %g5 245 cmp %g4, %g5
178 blu,pn %xcc, kvmap_vmalloc_addr 246 blu,pn %xcc, kvmap_dtlb_vmalloc_addr
179 mov 0x1, %g5 247 mov 0x1, %g5
180 sllx %g5, 32, %g5 248 sllx %g5, 32, %g5
181 cmp %g4, %g5 249 cmp %g4, %g5
182 blu,pn %xcc, kvmap_do_obp 250 blu,pn %xcc, kvmap_dtlb_obp
183 nop 251 nop
184 252 ba,pt %xcc, kvmap_dtlb_vmalloc_addr
185kvmap_vmalloc_addr:
186 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
187 ldxa [%g3 + %g6] ASI_N, %g5
188 brgez,pn %g5, longpath
189 nop 253 nop
190 254
191kvmap_load: 255kvmap_dtlb_longpath:
192 /* PTE is valid, load into TLB and return from trap. */ 256
193 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB 257661: rdpr %pstate, %g5
194 retry 258 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
259 .section .sun4v_2insn_patch, "ax"
260 .word 661b
261 SET_GL(1)
262 ldxa [%g0] ASI_SCRATCHPAD, %g5
263 .previous
264
265 rdpr %tl, %g3
266 cmp %g3, 1
267
268661: mov TLB_TAG_ACCESS, %g4
269 ldxa [%g4] ASI_DMMU, %g5
270 .section .sun4v_2insn_patch, "ax"
271 .word 661b
272 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
273 nop
274 .previous
275
276 be,pt %xcc, sparc64_realfault_common
277 mov FAULT_CODE_DTLB, %g4
278 ba,pt %xcc, winfix_trampoline
279 nop
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 2ff7c32ab0ce..95ffa9418620 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -188,6 +188,7 @@ extern void psycho_init(int, char *);
188extern void schizo_init(int, char *); 188extern void schizo_init(int, char *);
189extern void schizo_plus_init(int, char *); 189extern void schizo_plus_init(int, char *);
190extern void tomatillo_init(int, char *); 190extern void tomatillo_init(int, char *);
191extern void sun4v_pci_init(int, char *);
191 192
192static struct { 193static struct {
193 char *model_name; 194 char *model_name;
@@ -204,6 +205,7 @@ static struct {
204 { "pci108e,8002", schizo_plus_init }, 205 { "pci108e,8002", schizo_plus_init },
205 { "SUNW,tomatillo", tomatillo_init }, 206 { "SUNW,tomatillo", tomatillo_init },
206 { "pci108e,a801", tomatillo_init }, 207 { "pci108e,a801", tomatillo_init },
208 { "SUNW,sun4v-pci", sun4v_pci_init },
207}; 209};
208#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \ 210#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
209 sizeof(pci_controller_table[0])) 211 sizeof(pci_controller_table[0]))
@@ -283,6 +285,12 @@ int __init pcic_present(void)
283 return pci_controller_scan(pci_is_controller); 285 return pci_controller_scan(pci_is_controller);
284} 286}
285 287
288struct pci_iommu_ops *pci_iommu_ops;
289EXPORT_SYMBOL(pci_iommu_ops);
290
291extern struct pci_iommu_ops pci_sun4u_iommu_ops,
292 pci_sun4v_iommu_ops;
293
286/* Find each controller in the system, attach and initialize 294/* Find each controller in the system, attach and initialize
287 * software state structure for each and link into the 295 * software state structure for each and link into the
288 * pci_controller_root. Setup the controller enough such 296 * pci_controller_root. Setup the controller enough such
@@ -290,6 +298,11 @@ int __init pcic_present(void)
290 */ 298 */
291static void __init pci_controller_probe(void) 299static void __init pci_controller_probe(void)
292{ 300{
301 if (tlb_type == hypervisor)
302 pci_iommu_ops = &pci_sun4v_iommu_ops;
303 else
304 pci_iommu_ops = &pci_sun4u_iommu_ops;
305
293 printk("PCI: Probing for controllers.\n"); 306 printk("PCI: Probing for controllers.\n");
294 307
295 pci_controller_scan(pci_controller_init); 308 pci_controller_scan(pci_controller_init);
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
index 58310aacea28..33dedb1aacd4 100644
--- a/arch/sparc64/kernel/pci_common.c
+++ b/arch/sparc64/kernel/pci_common.c
@@ -39,6 +39,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm,
39{ 39{
40 int node; 40 int node;
41 41
42 *nregs = 0;
43
42 /* 44 /*
43 * Return the PBM's PROM node in case we are it's PCI device, 45 * Return the PBM's PROM node in case we are it's PCI device,
44 * as the PBM's reg property is different to standard PCI reg 46 * as the PBM's reg property is different to standard PCI reg
@@ -51,10 +53,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm,
51 pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || 53 pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
52 pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || 54 pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
53 pdev->device == PCI_DEVICE_ID_SUN_SABRE || 55 pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
54 pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) { 56 pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD))
55 *nregs = 0;
56 return bus_prom_node; 57 return bus_prom_node;
57 }
58 58
59 node = prom_getchild(bus_prom_node); 59 node = prom_getchild(bus_prom_node);
60 while (node != 0) { 60 while (node != 0) {
@@ -541,135 +541,183 @@ void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
541 pci_assign_unassigned(pbm, bus); 541 pci_assign_unassigned(pbm, bus);
542} 542}
543 543
544static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt) 544static inline unsigned int pci_slot_swivel(struct pci_pbm_info *pbm,
545 struct pci_dev *toplevel_pdev,
546 struct pci_dev *pdev,
547 unsigned int interrupt)
545{ 548{
546 struct linux_prom_pci_intmap bridge_local_intmap[PROM_PCIIMAP_MAX], *intmap; 549 unsigned int ret;
547 struct linux_prom_pci_intmask bridge_local_intmask, *intmask;
548 struct pcidev_cookie *dev_pcp = pdev->sysdata;
549 struct pci_pbm_info *pbm = dev_pcp->pbm;
550 struct linux_prom_pci_registers *pregs = dev_pcp->prom_regs;
551 unsigned int hi, mid, lo, irq;
552 int i, num_intmap, map_slot;
553 550
554 intmap = &pbm->pbm_intmap[0]; 551 if (unlikely(interrupt < 1 || interrupt > 4)) {
555 intmask = &pbm->pbm_intmask; 552 printk("%s: Device %s interrupt value of %u is strange.\n",
556 num_intmap = pbm->num_pbm_intmap; 553 pbm->name, pci_name(pdev), interrupt);
557 map_slot = 0; 554 return interrupt;
555 }
558 556
559 /* If we are underneath a PCI bridge, use PROM register 557 ret = ((interrupt - 1 + (PCI_SLOT(pdev->devfn) & 3)) & 3) + 1;
560 * property of the parent bridge which is closest to 558
561 * the PBM. 559 printk("%s: %s IRQ Swivel %s [%x:%x] -> [%x]\n",
562 * 560 pbm->name, pci_name(toplevel_pdev), pci_name(pdev),
563 * However if that parent bridge has interrupt map/mask 561 interrupt, PCI_SLOT(pdev->devfn), ret);
564 * properties of its own we use the PROM register property 562
565 * of the next child device on the path to PDEV. 563 return ret;
566 * 564}
567 * In detail the two cases are (note that the 'X' below is the 565
568 * 'next child on the path to PDEV' mentioned above): 566static inline unsigned int pci_apply_intmap(struct pci_pbm_info *pbm,
569 * 567 struct pci_dev *toplevel_pdev,
570 * 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV 568 struct pci_dev *pbus,
571 * 569 struct pci_dev *pdev,
572 * Here we use regs of 'PCI bus' device. 570 unsigned int interrupt,
573 * 571 unsigned int *cnode)
574 * 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV 572{
575 * 573 struct linux_prom_pci_intmap imap[PROM_PCIIMAP_MAX];
576 * Here we use regs of 'X'. Note that X can be PDEV. 574 struct linux_prom_pci_intmask imask;
577 */ 575 struct pcidev_cookie *pbus_pcp = pbus->sysdata;
578 if (pdev->bus->number != pbm->pci_first_busno) { 576 struct pcidev_cookie *pdev_pcp = pdev->sysdata;
579 struct pcidev_cookie *bus_pcp, *regs_pcp; 577 struct linux_prom_pci_registers *pregs = pdev_pcp->prom_regs;
580 struct pci_dev *bus_dev, *regs_dev; 578 int plen, num_imap, i;
581 int plen; 579 unsigned int hi, mid, lo, irq, orig_interrupt;
580
581 *cnode = pbus_pcp->prom_node;
582
583 plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map",
584 (char *) &imap[0], sizeof(imap));
585 if (plen <= 0 ||
586 (plen % sizeof(struct linux_prom_pci_intmap)) != 0) {
587 printk("%s: Device %s interrupt-map has bad len %d\n",
588 pbm->name, pci_name(pbus), plen);
589 goto no_intmap;
590 }
591 num_imap = plen / sizeof(struct linux_prom_pci_intmap);
592
593 plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map-mask",
594 (char *) &imask, sizeof(imask));
595 if (plen <= 0 ||
596 (plen % sizeof(struct linux_prom_pci_intmask)) != 0) {
597 printk("%s: Device %s interrupt-map-mask has bad len %d\n",
598 pbm->name, pci_name(pbus), plen);
599 goto no_intmap;
600 }
601
602 orig_interrupt = interrupt;
582 603
583 bus_dev = pdev->bus->self; 604 hi = pregs->phys_hi & imask.phys_hi;
584 regs_dev = pdev; 605 mid = pregs->phys_mid & imask.phys_mid;
606 lo = pregs->phys_lo & imask.phys_lo;
607 irq = interrupt & imask.interrupt;
585 608
586 while (bus_dev->bus && 609 for (i = 0; i < num_imap; i++) {
587 bus_dev->bus->number != pbm->pci_first_busno) { 610 if (imap[i].phys_hi == hi &&
588 regs_dev = bus_dev; 611 imap[i].phys_mid == mid &&
589 bus_dev = bus_dev->bus->self; 612 imap[i].phys_lo == lo &&
613 imap[i].interrupt == irq) {
614 *cnode = imap[i].cnode;
615 interrupt = imap[i].cinterrupt;
590 } 616 }
617 }
591 618
592 regs_pcp = regs_dev->sysdata; 619 printk("%s: %s MAP BUS %s DEV %s [%x] -> [%x]\n",
593 pregs = regs_pcp->prom_regs; 620 pbm->name, pci_name(toplevel_pdev),
621 pci_name(pbus), pci_name(pdev),
622 orig_interrupt, interrupt);
594 623
595 bus_pcp = bus_dev->sysdata; 624no_intmap:
625 return interrupt;
626}
596 627
597 /* But if the PCI bridge has it's own interrupt map 628/* For each PCI bus on the way to the root:
598 * and mask properties, use that and the regs of the 629 * 1) If it has an interrupt-map property, apply it.
599 * PCI entity at the next level down on the path to the 630 * 2) Else, swivel the interrupt number based upon the PCI device number.
600 * device. 631 *
601 */ 632 * Return the "IRQ controller" node. If this is the PBM's device node,
602 plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map", 633 * all interrupt translations are complete, else we should use that node's
603 (char *) &bridge_local_intmap[0], 634 * "reg" property to apply the PBM's "interrupt-{map,mask}" to the interrupt.
604 sizeof(bridge_local_intmap)); 635 */
605 if (plen != -1) { 636static unsigned int __init pci_intmap_match_to_root(struct pci_pbm_info *pbm,
606 intmap = &bridge_local_intmap[0]; 637 struct pci_dev *pdev,
607 num_intmap = plen / sizeof(struct linux_prom_pci_intmap); 638 unsigned int *interrupt)
608 plen = prom_getproperty(bus_pcp->prom_node, 639{
609 "interrupt-map-mask", 640 struct pci_dev *toplevel_pdev = pdev;
610 (char *) &bridge_local_intmask, 641 struct pcidev_cookie *toplevel_pcp = toplevel_pdev->sysdata;
611 sizeof(bridge_local_intmask)); 642 unsigned int cnode = toplevel_pcp->prom_node;
612 if (plen == -1) { 643
613 printk("pci_intmap_match: Warning! Bridge has intmap " 644 while (pdev->bus->number != pbm->pci_first_busno) {
614 "but no intmask.\n"); 645 struct pci_dev *pbus = pdev->bus->self;
615 printk("pci_intmap_match: Trying to recover.\n"); 646 struct pcidev_cookie *pcp = pbus->sysdata;
616 return 0; 647 int plen;
617 }
618 648
619 if (pdev->bus->self != bus_dev) 649 plen = prom_getproplen(pcp->prom_node, "interrupt-map");
620 map_slot = 1; 650 if (plen <= 0) {
651 *interrupt = pci_slot_swivel(pbm, toplevel_pdev,
652 pdev, *interrupt);
653 cnode = pcp->prom_node;
621 } else { 654 } else {
622 pregs = bus_pcp->prom_regs; 655 *interrupt = pci_apply_intmap(pbm, toplevel_pdev,
623 map_slot = 1; 656 pbus, pdev,
657 *interrupt, &cnode);
658
659 while (pcp->prom_node != cnode &&
660 pbus->bus->number != pbm->pci_first_busno) {
661 pbus = pbus->bus->self;
662 pcp = pbus->sysdata;
663 }
624 } 664 }
625 } 665 pdev = pbus;
626 666
627 if (map_slot) { 667 if (cnode == pbm->prom_node)
628 *interrupt = ((*interrupt 668 break;
629 - 1
630 + PCI_SLOT(pdev->devfn)) & 0x3) + 1;
631 } 669 }
632 670
633 hi = pregs->phys_hi & intmask->phys_hi; 671 return cnode;
634 mid = pregs->phys_mid & intmask->phys_mid; 672}
635 lo = pregs->phys_lo & intmask->phys_lo; 673
636 irq = *interrupt & intmask->interrupt; 674static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt)
637 675{
638 for (i = 0; i < num_intmap; i++) { 676 struct pcidev_cookie *dev_pcp = pdev->sysdata;
639 if (intmap[i].phys_hi == hi && 677 struct pci_pbm_info *pbm = dev_pcp->pbm;
640 intmap[i].phys_mid == mid && 678 struct linux_prom_pci_registers reg[PROMREG_MAX];
641 intmap[i].phys_lo == lo && 679 unsigned int hi, mid, lo, irq;
642 intmap[i].interrupt == irq) { 680 int i, cnode, plen;
643 *interrupt = intmap[i].cinterrupt; 681
644 printk("PCI-IRQ: Routing bus[%2x] slot[%2x] map[%d] to INO[%02x]\n", 682 cnode = pci_intmap_match_to_root(pbm, pdev, interrupt);
645 pdev->bus->number, PCI_SLOT(pdev->devfn), 683 if (cnode == pbm->prom_node)
646 map_slot, *interrupt); 684 goto success;
647 return 1; 685
648 } 686 plen = prom_getproperty(cnode, "reg", (char *) reg, sizeof(reg));
687 if (plen <= 0 ||
688 (plen % sizeof(struct linux_prom_pci_registers)) != 0) {
689 printk("%s: OBP node %x reg property has bad len %d\n",
690 pbm->name, cnode, plen);
691 goto fail;
649 } 692 }
650 693
651 /* We will run this code even if pbm->num_pbm_intmap is zero, just so 694 hi = reg[0].phys_hi & pbm->pbm_intmask.phys_hi;
652 * we can apply the slot mapping to the PROM interrupt property value. 695 mid = reg[0].phys_mid & pbm->pbm_intmask.phys_mid;
653 * So do not spit out these warnings in that case. 696 lo = reg[0].phys_lo & pbm->pbm_intmask.phys_lo;
654 */ 697 irq = *interrupt & pbm->pbm_intmask.interrupt;
655 if (num_intmap != 0) { 698
656 /* Print it both to OBP console and kernel one so that if bootup 699 for (i = 0; i < pbm->num_pbm_intmap; i++) {
657 * hangs here the user has the information to report. 700 struct linux_prom_pci_intmap *intmap;
658 */ 701
659 prom_printf("pci_intmap_match: bus %02x, devfn %02x: ", 702 intmap = &pbm->pbm_intmap[i];
660 pdev->bus->number, pdev->devfn); 703
661 prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n", 704 if (intmap->phys_hi == hi &&
662 pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt); 705 intmap->phys_mid == mid &&
663 prom_printf("Please email this information to davem@redhat.com\n"); 706 intmap->phys_lo == lo &&
664 707 intmap->interrupt == irq) {
665 printk("pci_intmap_match: bus %02x, devfn %02x: ", 708 *interrupt = intmap->cinterrupt;
666 pdev->bus->number, pdev->devfn); 709 goto success;
667 printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n", 710 }
668 pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
669 printk("Please email this information to davem@redhat.com\n");
670 } 711 }
671 712
713fail:
672 return 0; 714 return 0;
715
716success:
717 printk("PCI-IRQ: Routing bus[%2x] slot[%2x] to INO[%02x]\n",
718 pdev->bus->number, PCI_SLOT(pdev->devfn),
719 *interrupt);
720 return 1;
673} 721}
674 722
675static void __init pdev_fixup_irq(struct pci_dev *pdev) 723static void __init pdev_fixup_irq(struct pci_dev *pdev)
@@ -703,16 +751,18 @@ static void __init pdev_fixup_irq(struct pci_dev *pdev)
703 return; 751 return;
704 } 752 }
705 753
706 /* Fully specified already? */ 754 if (tlb_type != hypervisor) {
707 if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) { 755 /* Fully specified already? */
708 pdev->irq = p->irq_build(pbm, pdev, prom_irq); 756 if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) {
709 goto have_irq; 757 pdev->irq = p->irq_build(pbm, pdev, prom_irq);
710 } 758 goto have_irq;
759 }
711 760
712 /* An onboard device? (bit 5 set) */ 761 /* An onboard device? (bit 5 set) */
713 if ((prom_irq & PCI_IRQ_INO) & 0x20) { 762 if ((prom_irq & PCI_IRQ_INO) & 0x20) {
714 pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq)); 763 pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq));
715 goto have_irq; 764 goto have_irq;
765 }
716 } 766 }
717 767
718 /* Can we find a matching entry in the interrupt-map? */ 768 /* Can we find a matching entry in the interrupt-map? */
@@ -927,33 +977,30 @@ void pci_register_legacy_regions(struct resource *io_res,
927 struct resource *p; 977 struct resource *p;
928 978
929 /* VGA Video RAM. */ 979 /* VGA Video RAM. */
930 p = kmalloc(sizeof(*p), GFP_KERNEL); 980 p = kzalloc(sizeof(*p), GFP_KERNEL);
931 if (!p) 981 if (!p)
932 return; 982 return;
933 983
934 memset(p, 0, sizeof(*p));
935 p->name = "Video RAM area"; 984 p->name = "Video RAM area";
936 p->start = mem_res->start + 0xa0000UL; 985 p->start = mem_res->start + 0xa0000UL;
937 p->end = p->start + 0x1ffffUL; 986 p->end = p->start + 0x1ffffUL;
938 p->flags = IORESOURCE_BUSY; 987 p->flags = IORESOURCE_BUSY;
939 request_resource(mem_res, p); 988 request_resource(mem_res, p);
940 989
941 p = kmalloc(sizeof(*p), GFP_KERNEL); 990 p = kzalloc(sizeof(*p), GFP_KERNEL);
942 if (!p) 991 if (!p)
943 return; 992 return;
944 993
945 memset(p, 0, sizeof(*p));
946 p->name = "System ROM"; 994 p->name = "System ROM";
947 p->start = mem_res->start + 0xf0000UL; 995 p->start = mem_res->start + 0xf0000UL;
948 p->end = p->start + 0xffffUL; 996 p->end = p->start + 0xffffUL;
949 p->flags = IORESOURCE_BUSY; 997 p->flags = IORESOURCE_BUSY;
950 request_resource(mem_res, p); 998 request_resource(mem_res, p);
951 999
952 p = kmalloc(sizeof(*p), GFP_KERNEL); 1000 p = kzalloc(sizeof(*p), GFP_KERNEL);
953 if (!p) 1001 if (!p)
954 return; 1002 return;
955 1003
956 memset(p, 0, sizeof(*p));
957 p->name = "Video ROM"; 1004 p->name = "Video ROM";
958 p->start = mem_res->start + 0xc0000UL; 1005 p->start = mem_res->start + 0xc0000UL;
959 p->end = p->start + 0x7fffUL; 1006 p->end = p->start + 0x7fffUL;
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index a11910be1013..8efbc139769d 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -139,12 +139,11 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset,
139 /* Allocate and initialize the free area map. */ 139 /* Allocate and initialize the free area map. */
140 sz = num_tsb_entries / 8; 140 sz = num_tsb_entries / 8;
141 sz = (sz + 7UL) & ~7UL; 141 sz = (sz + 7UL) & ~7UL;
142 iommu->arena.map = kmalloc(sz, GFP_KERNEL); 142 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
143 if (!iommu->arena.map) { 143 if (!iommu->arena.map) {
144 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); 144 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
145 prom_halt(); 145 prom_halt();
146 } 146 }
147 memset(iommu->arena.map, 0, sz);
148 iommu->arena.limit = num_tsb_entries; 147 iommu->arena.limit = num_tsb_entries;
149 148
150 /* Allocate and initialize the dummy page which we 149 /* Allocate and initialize the dummy page which we
@@ -219,7 +218,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
219 * DMA for PCI device PDEV. Return non-NULL cpu-side address if 218 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
220 * successful and set *DMA_ADDRP to the PCI side dma address. 219 * successful and set *DMA_ADDRP to the PCI side dma address.
221 */ 220 */
222void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) 221static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
223{ 222{
224 struct pcidev_cookie *pcp; 223 struct pcidev_cookie *pcp;
225 struct pci_iommu *iommu; 224 struct pci_iommu *iommu;
@@ -267,7 +266,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
267} 266}
268 267
269/* Free and unmap a consistent DMA translation. */ 268/* Free and unmap a consistent DMA translation. */
270void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) 269static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
271{ 270{
272 struct pcidev_cookie *pcp; 271 struct pcidev_cookie *pcp;
273 struct pci_iommu *iommu; 272 struct pci_iommu *iommu;
@@ -294,7 +293,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
294/* Map a single buffer at PTR of SZ bytes for PCI DMA 293/* Map a single buffer at PTR of SZ bytes for PCI DMA
295 * in streaming mode. 294 * in streaming mode.
296 */ 295 */
297dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) 296static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
298{ 297{
299 struct pcidev_cookie *pcp; 298 struct pcidev_cookie *pcp;
300 struct pci_iommu *iommu; 299 struct pci_iommu *iommu;
@@ -415,7 +414,7 @@ do_flush_sync:
415} 414}
416 415
417/* Unmap a single streaming mode DMA translation. */ 416/* Unmap a single streaming mode DMA translation. */
418void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 417static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
419{ 418{
420 struct pcidev_cookie *pcp; 419 struct pcidev_cookie *pcp;
421 struct pci_iommu *iommu; 420 struct pci_iommu *iommu;
@@ -548,7 +547,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
548 * When making changes here, inspect the assembly output. I was having 547 * When making changes here, inspect the assembly output. I was having
549 * hard time to kepp this routine out of using stack slots for holding variables. 548 * hard time to kepp this routine out of using stack slots for holding variables.
550 */ 549 */
551int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 550static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
552{ 551{
553 struct pcidev_cookie *pcp; 552 struct pcidev_cookie *pcp;
554 struct pci_iommu *iommu; 553 struct pci_iommu *iommu;
@@ -562,9 +561,9 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
562 /* Fast path single entry scatterlists. */ 561 /* Fast path single entry scatterlists. */
563 if (nelems == 1) { 562 if (nelems == 1) {
564 sglist->dma_address = 563 sglist->dma_address =
565 pci_map_single(pdev, 564 pci_4u_map_single(pdev,
566 (page_address(sglist->page) + sglist->offset), 565 (page_address(sglist->page) + sglist->offset),
567 sglist->length, direction); 566 sglist->length, direction);
568 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) 567 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
569 return 0; 568 return 0;
570 sglist->dma_length = sglist->length; 569 sglist->dma_length = sglist->length;
@@ -635,7 +634,7 @@ bad_no_ctx:
635} 634}
636 635
637/* Unmap a set of streaming mode DMA translations. */ 636/* Unmap a set of streaming mode DMA translations. */
638void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 637static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
639{ 638{
640 struct pcidev_cookie *pcp; 639 struct pcidev_cookie *pcp;
641 struct pci_iommu *iommu; 640 struct pci_iommu *iommu;
@@ -695,7 +694,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
695/* Make physical memory consistent for a single 694/* Make physical memory consistent for a single
696 * streaming mode DMA translation after a transfer. 695 * streaming mode DMA translation after a transfer.
697 */ 696 */
698void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 697static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
699{ 698{
700 struct pcidev_cookie *pcp; 699 struct pcidev_cookie *pcp;
701 struct pci_iommu *iommu; 700 struct pci_iommu *iommu;
@@ -735,7 +734,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
735/* Make physical memory consistent for a set of streaming 734/* Make physical memory consistent for a set of streaming
736 * mode DMA translations after a transfer. 735 * mode DMA translations after a transfer.
737 */ 736 */
738void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 737static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
739{ 738{
740 struct pcidev_cookie *pcp; 739 struct pcidev_cookie *pcp;
741 struct pci_iommu *iommu; 740 struct pci_iommu *iommu;
@@ -776,6 +775,17 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
776 spin_unlock_irqrestore(&iommu->lock, flags); 775 spin_unlock_irqrestore(&iommu->lock, flags);
777} 776}
778 777
778struct pci_iommu_ops pci_sun4u_iommu_ops = {
779 .alloc_consistent = pci_4u_alloc_consistent,
780 .free_consistent = pci_4u_free_consistent,
781 .map_single = pci_4u_map_single,
782 .unmap_single = pci_4u_unmap_single,
783 .map_sg = pci_4u_map_sg,
784 .unmap_sg = pci_4u_unmap_sg,
785 .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu,
786 .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu,
787};
788
779static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) 789static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
780{ 790{
781 struct pci_dev *ali_isa_bridge; 791 struct pci_dev *ali_isa_bridge;
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index c03ed5f49d31..d17878b145c2 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -286,17 +286,17 @@ static unsigned char psycho_pil_table[] = {
286/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ 286/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
287/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ 287/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
288/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ 288/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
289/*0x20*/4, /* SCSI */ 289/*0x20*/5, /* SCSI */
290/*0x21*/5, /* Ethernet */ 290/*0x21*/5, /* Ethernet */
291/*0x22*/8, /* Parallel Port */ 291/*0x22*/8, /* Parallel Port */
292/*0x23*/13, /* Audio Record */ 292/*0x23*/13, /* Audio Record */
293/*0x24*/14, /* Audio Playback */ 293/*0x24*/14, /* Audio Playback */
294/*0x25*/15, /* PowerFail */ 294/*0x25*/15, /* PowerFail */
295/*0x26*/4, /* second SCSI */ 295/*0x26*/5, /* second SCSI */
296/*0x27*/11, /* Floppy */ 296/*0x27*/11, /* Floppy */
297/*0x28*/4, /* Spare Hardware */ 297/*0x28*/5, /* Spare Hardware */
298/*0x29*/9, /* Keyboard */ 298/*0x29*/9, /* Keyboard */
299/*0x2a*/4, /* Mouse */ 299/*0x2a*/5, /* Mouse */
300/*0x2b*/12, /* Serial */ 300/*0x2b*/12, /* Serial */
301/*0x2c*/10, /* Timer 0 */ 301/*0x2c*/10, /* Timer 0 */
302/*0x2d*/11, /* Timer 1 */ 302/*0x2d*/11, /* Timer 1 */
@@ -313,11 +313,11 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
313 313
314 ret = psycho_pil_table[ino]; 314 ret = psycho_pil_table[ino];
315 if (ret == 0 && pdev == NULL) { 315 if (ret == 0 && pdev == NULL) {
316 ret = 4; 316 ret = 5;
317 } else if (ret == 0) { 317 } else if (ret == 0) {
318 switch ((pdev->class >> 16) & 0xff) { 318 switch ((pdev->class >> 16) & 0xff) {
319 case PCI_BASE_CLASS_STORAGE: 319 case PCI_BASE_CLASS_STORAGE:
320 ret = 4; 320 ret = 5;
321 break; 321 break;
322 322
323 case PCI_BASE_CLASS_NETWORK: 323 case PCI_BASE_CLASS_NETWORK:
@@ -336,7 +336,7 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
336 break; 336 break;
337 337
338 default: 338 default:
339 ret = 4; 339 ret = 5;
340 break; 340 break;
341 }; 341 };
342 } 342 }
@@ -1164,7 +1164,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
1164static void pbm_scan_bus(struct pci_controller_info *p, 1164static void pbm_scan_bus(struct pci_controller_info *p,
1165 struct pci_pbm_info *pbm) 1165 struct pci_pbm_info *pbm)
1166{ 1166{
1167 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); 1167 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
1168 1168
1169 if (!cookie) { 1169 if (!cookie) {
1170 prom_printf("PSYCHO: Critical allocation failure.\n"); 1170 prom_printf("PSYCHO: Critical allocation failure.\n");
@@ -1172,7 +1172,6 @@ static void pbm_scan_bus(struct pci_controller_info *p,
1172 } 1172 }
1173 1173
1174 /* All we care about is the PBM. */ 1174 /* All we care about is the PBM. */
1175 memset(cookie, 0, sizeof(*cookie));
1176 cookie->pbm = pbm; 1175 cookie->pbm = pbm;
1177 1176
1178 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, 1177 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
@@ -1465,18 +1464,16 @@ void psycho_init(int node, char *model_name)
1465 } 1464 }
1466 } 1465 }
1467 1466
1468 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1467 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1469 if (!p) { 1468 if (!p) {
1470 prom_printf("PSYCHO: Fatal memory allocation error.\n"); 1469 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1471 prom_halt(); 1470 prom_halt();
1472 } 1471 }
1473 memset(p, 0, sizeof(*p)); 1472 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1474 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1475 if (!iommu) { 1473 if (!iommu) {
1476 prom_printf("PSYCHO: Fatal memory allocation error.\n"); 1474 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1477 prom_halt(); 1475 prom_halt();
1478 } 1476 }
1479 memset(iommu, 0, sizeof(*iommu));
1480 p->pbm_A.iommu = p->pbm_B.iommu = iommu; 1477 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1481 1478
1482 p->next = pci_controller_root; 1479 p->next = pci_controller_root;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index da8e1364194f..f67bb7f078cf 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -533,17 +533,17 @@ static unsigned char sabre_pil_table[] = {
533/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ 533/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
534/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ 534/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
535/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ 535/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
536/*0x20*/4, /* SCSI */ 536/*0x20*/5, /* SCSI */
537/*0x21*/5, /* Ethernet */ 537/*0x21*/5, /* Ethernet */
538/*0x22*/8, /* Parallel Port */ 538/*0x22*/8, /* Parallel Port */
539/*0x23*/13, /* Audio Record */ 539/*0x23*/13, /* Audio Record */
540/*0x24*/14, /* Audio Playback */ 540/*0x24*/14, /* Audio Playback */
541/*0x25*/15, /* PowerFail */ 541/*0x25*/15, /* PowerFail */
542/*0x26*/4, /* second SCSI */ 542/*0x26*/5, /* second SCSI */
543/*0x27*/11, /* Floppy */ 543/*0x27*/11, /* Floppy */
544/*0x28*/4, /* Spare Hardware */ 544/*0x28*/5, /* Spare Hardware */
545/*0x29*/9, /* Keyboard */ 545/*0x29*/9, /* Keyboard */
546/*0x2a*/4, /* Mouse */ 546/*0x2a*/5, /* Mouse */
547/*0x2b*/12, /* Serial */ 547/*0x2b*/12, /* Serial */
548/*0x2c*/10, /* Timer 0 */ 548/*0x2c*/10, /* Timer 0 */
549/*0x2d*/11, /* Timer 1 */ 549/*0x2d*/11, /* Timer 1 */
@@ -565,11 +565,11 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
565 565
566 ret = sabre_pil_table[ino]; 566 ret = sabre_pil_table[ino];
567 if (ret == 0 && pdev == NULL) { 567 if (ret == 0 && pdev == NULL) {
568 ret = 4; 568 ret = 5;
569 } else if (ret == 0) { 569 } else if (ret == 0) {
570 switch ((pdev->class >> 16) & 0xff) { 570 switch ((pdev->class >> 16) & 0xff) {
571 case PCI_BASE_CLASS_STORAGE: 571 case PCI_BASE_CLASS_STORAGE:
572 ret = 4; 572 ret = 5;
573 break; 573 break;
574 574
575 case PCI_BASE_CLASS_NETWORK: 575 case PCI_BASE_CLASS_NETWORK:
@@ -588,7 +588,7 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
588 break; 588 break;
589 589
590 default: 590 default:
591 ret = 4; 591 ret = 5;
592 break; 592 break;
593 }; 593 };
594 } 594 }
@@ -1167,7 +1167,7 @@ static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
1167 1167
1168static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) 1168static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
1169{ 1169{
1170 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); 1170 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
1171 1171
1172 if (!cookie) { 1172 if (!cookie) {
1173 prom_printf("SABRE: Critical allocation failure.\n"); 1173 prom_printf("SABRE: Critical allocation failure.\n");
@@ -1175,7 +1175,6 @@ static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
1175 } 1175 }
1176 1176
1177 /* All we care about is the PBM. */ 1177 /* All we care about is the PBM. */
1178 memset(cookie, 0, sizeof(*cookie));
1179 cookie->pbm = pbm; 1178 cookie->pbm = pbm;
1180 1179
1181 return cookie; 1180 return cookie;
@@ -1556,19 +1555,17 @@ void sabre_init(int pnode, char *model_name)
1556 } 1555 }
1557 } 1556 }
1558 1557
1559 p = kmalloc(sizeof(*p), GFP_ATOMIC); 1558 p = kzalloc(sizeof(*p), GFP_ATOMIC);
1560 if (!p) { 1559 if (!p) {
1561 prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); 1560 prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n");
1562 prom_halt(); 1561 prom_halt();
1563 } 1562 }
1564 memset(p, 0, sizeof(*p));
1565 1563
1566 iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC); 1564 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
1567 if (!iommu) { 1565 if (!iommu) {
1568 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); 1566 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
1569 prom_halt(); 1567 prom_halt();
1570 } 1568 }
1571 memset(iommu, 0, sizeof(*iommu));
1572 p->pbm_A.iommu = p->pbm_B.iommu = iommu; 1569 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1573 1570
1574 upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff); 1571 upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff);
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index d8c4e0919b4e..7fe4de03ac2e 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -243,8 +243,8 @@ static unsigned char schizo_pil_table[] = {
243/*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */ 243/*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */
244/*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */ 244/*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */
245/*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */ 245/*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */
246/*0x18*/4, /* SCSI */ 246/*0x18*/5, /* SCSI */
247/*0x19*/4, /* second SCSI */ 247/*0x19*/5, /* second SCSI */
248/*0x1a*/0, /* UNKNOWN */ 248/*0x1a*/0, /* UNKNOWN */
249/*0x1b*/0, /* UNKNOWN */ 249/*0x1b*/0, /* UNKNOWN */
250/*0x1c*/8, /* Parallel */ 250/*0x1c*/8, /* Parallel */
@@ -254,7 +254,7 @@ static unsigned char schizo_pil_table[] = {
254/*0x20*/13, /* Audio Record */ 254/*0x20*/13, /* Audio Record */
255/*0x21*/14, /* Audio Playback */ 255/*0x21*/14, /* Audio Playback */
256/*0x22*/12, /* Serial */ 256/*0x22*/12, /* Serial */
257/*0x23*/4, /* EBUS I2C */ 257/*0x23*/5, /* EBUS I2C */
258/*0x24*/10, /* RTC Clock */ 258/*0x24*/10, /* RTC Clock */
259/*0x25*/11, /* Floppy */ 259/*0x25*/11, /* Floppy */
260/*0x26*/0, /* UNKNOWN */ 260/*0x26*/0, /* UNKNOWN */
@@ -296,11 +296,11 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
296 296
297 ret = schizo_pil_table[ino]; 297 ret = schizo_pil_table[ino];
298 if (ret == 0 && pdev == NULL) { 298 if (ret == 0 && pdev == NULL) {
299 ret = 4; 299 ret = 5;
300 } else if (ret == 0) { 300 } else if (ret == 0) {
301 switch ((pdev->class >> 16) & 0xff) { 301 switch ((pdev->class >> 16) & 0xff) {
302 case PCI_BASE_CLASS_STORAGE: 302 case PCI_BASE_CLASS_STORAGE:
303 ret = 4; 303 ret = 5;
304 break; 304 break;
305 305
306 case PCI_BASE_CLASS_NETWORK: 306 case PCI_BASE_CLASS_NETWORK:
@@ -319,7 +319,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
319 break; 319 break;
320 320
321 default: 321 default:
322 ret = 4; 322 ret = 5;
323 break; 323 break;
324 }; 324 };
325 } 325 }
@@ -1525,7 +1525,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
1525static void pbm_scan_bus(struct pci_controller_info *p, 1525static void pbm_scan_bus(struct pci_controller_info *p,
1526 struct pci_pbm_info *pbm) 1526 struct pci_pbm_info *pbm)
1527{ 1527{
1528 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); 1528 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
1529 1529
1530 if (!cookie) { 1530 if (!cookie) {
1531 prom_printf("%s: Critical allocation failure.\n", pbm->name); 1531 prom_printf("%s: Critical allocation failure.\n", pbm->name);
@@ -1533,7 +1533,6 @@ static void pbm_scan_bus(struct pci_controller_info *p,
1533 } 1533 }
1534 1534
1535 /* All we care about is the PBM. */ 1535 /* All we care about is the PBM. */
1536 memset(cookie, 0, sizeof(*cookie));
1537 cookie->pbm = pbm; 1536 cookie->pbm = pbm;
1538 1537
1539 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, 1538 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
@@ -2120,27 +2119,24 @@ static void __schizo_init(int node, char *model_name, int chip_type)
2120 } 2119 }
2121 } 2120 }
2122 2121
2123 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 2122 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
2124 if (!p) { 2123 if (!p) {
2125 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 2124 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2126 prom_halt(); 2125 prom_halt();
2127 } 2126 }
2128 memset(p, 0, sizeof(*p));
2129 2127
2130 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 2128 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
2131 if (!iommu) { 2129 if (!iommu) {
2132 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 2130 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2133 prom_halt(); 2131 prom_halt();
2134 } 2132 }
2135 memset(iommu, 0, sizeof(*iommu));
2136 p->pbm_A.iommu = iommu; 2133 p->pbm_A.iommu = iommu;
2137 2134
2138 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 2135 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
2139 if (!iommu) { 2136 if (!iommu) {
2140 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 2137 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2141 prom_halt(); 2138 prom_halt();
2142 } 2139 }
2143 memset(iommu, 0, sizeof(*iommu));
2144 p->pbm_B.iommu = iommu; 2140 p->pbm_B.iommu = iommu;
2145 2141
2146 p->next = pci_controller_root; 2142 p->next = pci_controller_root;
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
new file mode 100644
index 000000000000..9372d4f376d5
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -0,0 +1,1147 @@
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13
14#include <asm/pbm.h>
15#include <asm/iommu.h>
16#include <asm/irq.h>
17#include <asm/upa.h>
18#include <asm/pstate.h>
19#include <asm/oplib.h>
20#include <asm/hypervisor.h>
21
22#include "pci_impl.h"
23#include "iommu_common.h"
24
25#include "pci_sun4v.h"
26
27#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
28
29struct pci_iommu_batch {
30 struct pci_dev *pdev; /* Device mapping is for. */
31 unsigned long prot; /* IOMMU page protections */
32 unsigned long entry; /* Index into IOTSB. */
33 u64 *pglist; /* List of physical pages */
34 unsigned long npages; /* Number of pages in list. */
35};
36
37static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);
38
39/* Interrupts must be disabled. */
40static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
41{
42 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
43
44 p->pdev = pdev;
45 p->prot = prot;
46 p->entry = entry;
47 p->npages = 0;
48}
49
50/* Interrupts must be disabled. */
51static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
52{
53 struct pcidev_cookie *pcp = p->pdev->sysdata;
54 unsigned long devhandle = pcp->pbm->devhandle;
55 unsigned long prot = p->prot;
56 unsigned long entry = p->entry;
57 u64 *pglist = p->pglist;
58 unsigned long npages = p->npages;
59
60 while (npages != 0) {
61 long num;
62
63 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
64 npages, prot, __pa(pglist));
65 if (unlikely(num < 0)) {
66 if (printk_ratelimit())
67 printk("pci_iommu_batch_flush: IOMMU map of "
68 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
69 "status %ld\n",
70 devhandle, HV_PCI_TSBID(0, entry),
71 npages, prot, __pa(pglist), num);
72 return -1;
73 }
74
75 entry += num;
76 npages -= num;
77 pglist += num;
78 }
79
80 p->entry = entry;
81 p->npages = 0;
82
83 return 0;
84}
85
86/* Interrupts must be disabled. */
87static inline long pci_iommu_batch_add(u64 phys_page)
88{
89 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
90
91 BUG_ON(p->npages >= PGLIST_NENTS);
92
93 p->pglist[p->npages++] = phys_page;
94 if (p->npages == PGLIST_NENTS)
95 return pci_iommu_batch_flush(p);
96
97 return 0;
98}
99
100/* Interrupts must be disabled. */
101static inline long pci_iommu_batch_end(void)
102{
103 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
104
105 BUG_ON(p->npages >= PGLIST_NENTS);
106
107 return pci_iommu_batch_flush(p);
108}
109
110static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
111{
112 unsigned long n, i, start, end, limit;
113 int pass;
114
115 limit = arena->limit;
116 start = arena->hint;
117 pass = 0;
118
119again:
120 n = find_next_zero_bit(arena->map, limit, start);
121 end = n + npages;
122 if (unlikely(end >= limit)) {
123 if (likely(pass < 1)) {
124 limit = start;
125 start = 0;
126 pass++;
127 goto again;
128 } else {
129 /* Scanned the whole thing, give up. */
130 return -1;
131 }
132 }
133
134 for (i = n; i < end; i++) {
135 if (test_bit(i, arena->map)) {
136 start = i + 1;
137 goto again;
138 }
139 }
140
141 for (i = n; i < end; i++)
142 __set_bit(i, arena->map);
143
144 arena->hint = end;
145
146 return n;
147}
148
149static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
150{
151 unsigned long i;
152
153 for (i = base; i < (base + npages); i++)
154 __clear_bit(i, arena->map);
155}
156
157static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
158{
159 struct pcidev_cookie *pcp;
160 struct pci_iommu *iommu;
161 unsigned long flags, order, first_page, npages, n;
162 void *ret;
163 long entry;
164
165 size = IO_PAGE_ALIGN(size);
166 order = get_order(size);
167 if (unlikely(order >= MAX_ORDER))
168 return NULL;
169
170 npages = size >> IO_PAGE_SHIFT;
171
172 first_page = __get_free_pages(GFP_ATOMIC, order);
173 if (unlikely(first_page == 0UL))
174 return NULL;
175
176 memset((char *)first_page, 0, PAGE_SIZE << order);
177
178 pcp = pdev->sysdata;
179 iommu = pcp->pbm->iommu;
180
181 spin_lock_irqsave(&iommu->lock, flags);
182 entry = pci_arena_alloc(&iommu->arena, npages);
183 spin_unlock_irqrestore(&iommu->lock, flags);
184
185 if (unlikely(entry < 0L))
186 goto arena_alloc_fail;
187
188 *dma_addrp = (iommu->page_table_map_base +
189 (entry << IO_PAGE_SHIFT));
190 ret = (void *) first_page;
191 first_page = __pa(first_page);
192
193 local_irq_save(flags);
194
195 pci_iommu_batch_start(pdev,
196 (HV_PCI_MAP_ATTR_READ |
197 HV_PCI_MAP_ATTR_WRITE),
198 entry);
199
200 for (n = 0; n < npages; n++) {
201 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
202 if (unlikely(err < 0L))
203 goto iommu_map_fail;
204 }
205
206 if (unlikely(pci_iommu_batch_end() < 0L))
207 goto iommu_map_fail;
208
209 local_irq_restore(flags);
210
211 return ret;
212
213iommu_map_fail:
214 /* Interrupts are disabled. */
215 spin_lock(&iommu->lock);
216 pci_arena_free(&iommu->arena, entry, npages);
217 spin_unlock_irqrestore(&iommu->lock, flags);
218
219arena_alloc_fail:
220 free_pages(first_page, order);
221 return NULL;
222}
223
224static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
225{
226 struct pcidev_cookie *pcp;
227 struct pci_iommu *iommu;
228 unsigned long flags, order, npages, entry;
229 u32 devhandle;
230
231 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
232 pcp = pdev->sysdata;
233 iommu = pcp->pbm->iommu;
234 devhandle = pcp->pbm->devhandle;
235 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
236
237 spin_lock_irqsave(&iommu->lock, flags);
238
239 pci_arena_free(&iommu->arena, entry, npages);
240
241 do {
242 unsigned long num;
243
244 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
245 npages);
246 entry += num;
247 npages -= num;
248 } while (npages != 0);
249
250 spin_unlock_irqrestore(&iommu->lock, flags);
251
252 order = get_order(size);
253 if (order < 10)
254 free_pages((unsigned long)cpu, order);
255}
256
257static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
258{
259 struct pcidev_cookie *pcp;
260 struct pci_iommu *iommu;
261 unsigned long flags, npages, oaddr;
262 unsigned long i, base_paddr;
263 u32 bus_addr, ret;
264 unsigned long prot;
265 long entry;
266
267 pcp = pdev->sysdata;
268 iommu = pcp->pbm->iommu;
269
270 if (unlikely(direction == PCI_DMA_NONE))
271 goto bad;
272
273 oaddr = (unsigned long)ptr;
274 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
275 npages >>= IO_PAGE_SHIFT;
276
277 spin_lock_irqsave(&iommu->lock, flags);
278 entry = pci_arena_alloc(&iommu->arena, npages);
279 spin_unlock_irqrestore(&iommu->lock, flags);
280
281 if (unlikely(entry < 0L))
282 goto bad;
283
284 bus_addr = (iommu->page_table_map_base +
285 (entry << IO_PAGE_SHIFT));
286 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
287 base_paddr = __pa(oaddr & IO_PAGE_MASK);
288 prot = HV_PCI_MAP_ATTR_READ;
289 if (direction != PCI_DMA_TODEVICE)
290 prot |= HV_PCI_MAP_ATTR_WRITE;
291
292 local_irq_save(flags);
293
294 pci_iommu_batch_start(pdev, prot, entry);
295
296 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
297 long err = pci_iommu_batch_add(base_paddr);
298 if (unlikely(err < 0L))
299 goto iommu_map_fail;
300 }
301 if (unlikely(pci_iommu_batch_end() < 0L))
302 goto iommu_map_fail;
303
304 local_irq_restore(flags);
305
306 return ret;
307
308bad:
309 if (printk_ratelimit())
310 WARN_ON(1);
311 return PCI_DMA_ERROR_CODE;
312
313iommu_map_fail:
314 /* Interrupts are disabled. */
315 spin_lock(&iommu->lock);
316 pci_arena_free(&iommu->arena, entry, npages);
317 spin_unlock_irqrestore(&iommu->lock, flags);
318
319 return PCI_DMA_ERROR_CODE;
320}
321
322static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
323{
324 struct pcidev_cookie *pcp;
325 struct pci_iommu *iommu;
326 unsigned long flags, npages;
327 long entry;
328 u32 devhandle;
329
330 if (unlikely(direction == PCI_DMA_NONE)) {
331 if (printk_ratelimit())
332 WARN_ON(1);
333 return;
334 }
335
336 pcp = pdev->sysdata;
337 iommu = pcp->pbm->iommu;
338 devhandle = pcp->pbm->devhandle;
339
340 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
341 npages >>= IO_PAGE_SHIFT;
342 bus_addr &= IO_PAGE_MASK;
343
344 spin_lock_irqsave(&iommu->lock, flags);
345
346 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
347 pci_arena_free(&iommu->arena, entry, npages);
348
349 do {
350 unsigned long num;
351
352 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
353 npages);
354 entry += num;
355 npages -= num;
356 } while (npages != 0);
357
358 spin_unlock_irqrestore(&iommu->lock, flags);
359}
360
361#define SG_ENT_PHYS_ADDRESS(SG) \
362 (__pa(page_address((SG)->page)) + (SG)->offset)
363
364static inline long fill_sg(long entry, struct pci_dev *pdev,
365 struct scatterlist *sg,
366 int nused, int nelems, unsigned long prot)
367{
368 struct scatterlist *dma_sg = sg;
369 struct scatterlist *sg_end = sg + nelems;
370 unsigned long flags;
371 int i;
372
373 local_irq_save(flags);
374
375 pci_iommu_batch_start(pdev, prot, entry);
376
377 for (i = 0; i < nused; i++) {
378 unsigned long pteval = ~0UL;
379 u32 dma_npages;
380
381 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
382 dma_sg->dma_length +
383 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
384 do {
385 unsigned long offset;
386 signed int len;
387
388 /* If we are here, we know we have at least one
389 * more page to map. So walk forward until we
390 * hit a page crossing, and begin creating new
391 * mappings from that spot.
392 */
393 for (;;) {
394 unsigned long tmp;
395
396 tmp = SG_ENT_PHYS_ADDRESS(sg);
397 len = sg->length;
398 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
399 pteval = tmp & IO_PAGE_MASK;
400 offset = tmp & (IO_PAGE_SIZE - 1UL);
401 break;
402 }
403 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
404 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
405 offset = 0UL;
406 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
407 break;
408 }
409 sg++;
410 }
411
412 pteval = (pteval & IOPTE_PAGE);
413 while (len > 0) {
414 long err;
415
416 err = pci_iommu_batch_add(pteval);
417 if (unlikely(err < 0L))
418 goto iommu_map_failed;
419
420 pteval += IO_PAGE_SIZE;
421 len -= (IO_PAGE_SIZE - offset);
422 offset = 0;
423 dma_npages--;
424 }
425
426 pteval = (pteval & IOPTE_PAGE) + len;
427 sg++;
428
429 /* Skip over any tail mappings we've fully mapped,
430 * adjusting pteval along the way. Stop when we
431 * detect a page crossing event.
432 */
433 while (sg < sg_end &&
434 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
435 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
436 ((pteval ^
437 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
438 pteval += sg->length;
439 sg++;
440 }
441 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
442 pteval = ~0UL;
443 } while (dma_npages != 0);
444 dma_sg++;
445 }
446
447 if (unlikely(pci_iommu_batch_end() < 0L))
448 goto iommu_map_failed;
449
450 local_irq_restore(flags);
451 return 0;
452
453iommu_map_failed:
454 local_irq_restore(flags);
455 return -1L;
456}
457
458static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
459{
460 struct pcidev_cookie *pcp;
461 struct pci_iommu *iommu;
462 unsigned long flags, npages, prot;
463 u32 dma_base;
464 struct scatterlist *sgtmp;
465 long entry, err;
466 int used;
467
468 /* Fast path single entry scatterlists. */
469 if (nelems == 1) {
470 sglist->dma_address =
471 pci_4v_map_single(pdev,
472 (page_address(sglist->page) + sglist->offset),
473 sglist->length, direction);
474 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
475 return 0;
476 sglist->dma_length = sglist->length;
477 return 1;
478 }
479
480 pcp = pdev->sysdata;
481 iommu = pcp->pbm->iommu;
482
483 if (unlikely(direction == PCI_DMA_NONE))
484 goto bad;
485
486 /* Step 1: Prepare scatter list. */
487 npages = prepare_sg(sglist, nelems);
488
489 /* Step 2: Allocate a cluster and context, if necessary. */
490 spin_lock_irqsave(&iommu->lock, flags);
491 entry = pci_arena_alloc(&iommu->arena, npages);
492 spin_unlock_irqrestore(&iommu->lock, flags);
493
494 if (unlikely(entry < 0L))
495 goto bad;
496
497 dma_base = iommu->page_table_map_base +
498 (entry << IO_PAGE_SHIFT);
499
500 /* Step 3: Normalize DMA addresses. */
501 used = nelems;
502
503 sgtmp = sglist;
504 while (used && sgtmp->dma_length) {
505 sgtmp->dma_address += dma_base;
506 sgtmp++;
507 used--;
508 }
509 used = nelems - used;
510
511 /* Step 4: Create the mappings. */
512 prot = HV_PCI_MAP_ATTR_READ;
513 if (direction != PCI_DMA_TODEVICE)
514 prot |= HV_PCI_MAP_ATTR_WRITE;
515
516 err = fill_sg(entry, pdev, sglist, used, nelems, prot);
517 if (unlikely(err < 0L))
518 goto iommu_map_failed;
519
520 return used;
521
522bad:
523 if (printk_ratelimit())
524 WARN_ON(1);
525 return 0;
526
527iommu_map_failed:
528 spin_lock_irqsave(&iommu->lock, flags);
529 pci_arena_free(&iommu->arena, entry, npages);
530 spin_unlock_irqrestore(&iommu->lock, flags);
531
532 return 0;
533}
534
535static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
536{
537 struct pcidev_cookie *pcp;
538 struct pci_iommu *iommu;
539 unsigned long flags, i, npages;
540 long entry;
541 u32 devhandle, bus_addr;
542
543 if (unlikely(direction == PCI_DMA_NONE)) {
544 if (printk_ratelimit())
545 WARN_ON(1);
546 }
547
548 pcp = pdev->sysdata;
549 iommu = pcp->pbm->iommu;
550 devhandle = pcp->pbm->devhandle;
551
552 bus_addr = sglist->dma_address & IO_PAGE_MASK;
553
554 for (i = 1; i < nelems; i++)
555 if (sglist[i].dma_length == 0)
556 break;
557 i--;
558 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
559 bus_addr) >> IO_PAGE_SHIFT;
560
561 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
562
563 spin_lock_irqsave(&iommu->lock, flags);
564
565 pci_arena_free(&iommu->arena, entry, npages);
566
567 do {
568 unsigned long num;
569
570 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
571 npages);
572 entry += num;
573 npages -= num;
574 } while (npages != 0);
575
576 spin_unlock_irqrestore(&iommu->lock, flags);
577}
578
579static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
580{
581 /* Nothing to do... */
582}
583
584static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
585{
586 /* Nothing to do... */
587}
588
589struct pci_iommu_ops pci_sun4v_iommu_ops = {
590 .alloc_consistent = pci_4v_alloc_consistent,
591 .free_consistent = pci_4v_free_consistent,
592 .map_single = pci_4v_map_single,
593 .unmap_single = pci_4v_unmap_single,
594 .map_sg = pci_4v_map_sg,
595 .unmap_sg = pci_4v_unmap_sg,
596 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
597 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
598};
599
600/* SUN4V PCI configuration space accessors. */
601
602static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
603{
604 if (bus == pbm->pci_first_busno) {
605 if (device == 0 && func == 0)
606 return 0;
607 return 1;
608 }
609
610 if (bus < pbm->pci_first_busno ||
611 bus > pbm->pci_last_busno)
612 return 1;
613 return 0;
614}
615
616static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
617 int where, int size, u32 *value)
618{
619 struct pci_pbm_info *pbm = bus_dev->sysdata;
620 u32 devhandle = pbm->devhandle;
621 unsigned int bus = bus_dev->number;
622 unsigned int device = PCI_SLOT(devfn);
623 unsigned int func = PCI_FUNC(devfn);
624 unsigned long ret;
625
626 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
627 ret = ~0UL;
628 } else {
629 ret = pci_sun4v_config_get(devhandle,
630 HV_PCI_DEVICE_BUILD(bus, device, func),
631 where, size);
632#if 0
633 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
634 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
635 where, size, ret);
636#endif
637 }
638 switch (size) {
639 case 1:
640 *value = ret & 0xff;
641 break;
642 case 2:
643 *value = ret & 0xffff;
644 break;
645 case 4:
646 *value = ret & 0xffffffff;
647 break;
648 };
649
650
651 return PCIBIOS_SUCCESSFUL;
652}
653
654static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
655 int where, int size, u32 value)
656{
657 struct pci_pbm_info *pbm = bus_dev->sysdata;
658 u32 devhandle = pbm->devhandle;
659 unsigned int bus = bus_dev->number;
660 unsigned int device = PCI_SLOT(devfn);
661 unsigned int func = PCI_FUNC(devfn);
662 unsigned long ret;
663
664 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
665 /* Do nothing. */
666 } else {
667 ret = pci_sun4v_config_put(devhandle,
668 HV_PCI_DEVICE_BUILD(bus, device, func),
669 where, size, value);
670#if 0
671 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
672 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
673 where, size, value, ret);
674#endif
675 }
676 return PCIBIOS_SUCCESSFUL;
677}
678
679static struct pci_ops pci_sun4v_ops = {
680 .read = pci_sun4v_read_pci_cfg,
681 .write = pci_sun4v_write_pci_cfg,
682};
683
684
685static void pbm_scan_bus(struct pci_controller_info *p,
686 struct pci_pbm_info *pbm)
687{
688 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
689
690 if (!cookie) {
691 prom_printf("%s: Critical allocation failure.\n", pbm->name);
692 prom_halt();
693 }
694
695 /* All we care about is the PBM. */
696 memset(cookie, 0, sizeof(*cookie));
697 cookie->pbm = pbm;
698
699 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
700#if 0
701 pci_fixup_host_bridge_self(pbm->pci_bus);
702 pbm->pci_bus->self->sysdata = cookie;
703#endif
704 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
705 pbm->prom_node);
706 pci_record_assignments(pbm, pbm->pci_bus);
707 pci_assign_unassigned(pbm, pbm->pci_bus);
708 pci_fixup_irq(pbm, pbm->pci_bus);
709 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
710 pci_setup_busmastering(pbm, pbm->pci_bus);
711}
712
713static void pci_sun4v_scan_bus(struct pci_controller_info *p)
714{
715 if (p->pbm_A.prom_node) {
716 p->pbm_A.is_66mhz_capable =
717 prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
718
719 pbm_scan_bus(p, &p->pbm_A);
720 }
721 if (p->pbm_B.prom_node) {
722 p->pbm_B.is_66mhz_capable =
723 prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
724
725 pbm_scan_bus(p, &p->pbm_B);
726 }
727
728 /* XXX register error interrupt handlers XXX */
729}
730
731static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
732 struct pci_dev *pdev,
733 unsigned int devino)
734{
735 u32 devhandle = pbm->devhandle;
736 int pil;
737
738 pil = 5;
739 if (pdev) {
740 switch ((pdev->class >> 16) & 0xff) {
741 case PCI_BASE_CLASS_STORAGE:
742 pil = 5;
743 break;
744
745 case PCI_BASE_CLASS_NETWORK:
746 pil = 6;
747 break;
748
749 case PCI_BASE_CLASS_DISPLAY:
750 pil = 9;
751 break;
752
753 case PCI_BASE_CLASS_MULTIMEDIA:
754 case PCI_BASE_CLASS_MEMORY:
755 case PCI_BASE_CLASS_BRIDGE:
756 case PCI_BASE_CLASS_SERIAL:
757 pil = 10;
758 break;
759
760 default:
761 pil = 5;
762 break;
763 };
764 }
765 BUG_ON(PIL_RESERVED(pil));
766
767 return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
768}
769
770static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
771{
772 struct pcidev_cookie *pcp = pdev->sysdata;
773 struct pci_pbm_info *pbm = pcp->pbm;
774 struct resource *res, *root;
775 u32 reg;
776 int where, size, is_64bit;
777
778 res = &pdev->resource[resource];
779 if (resource < 6) {
780 where = PCI_BASE_ADDRESS_0 + (resource * 4);
781 } else if (resource == PCI_ROM_RESOURCE) {
782 where = pdev->rom_base_reg;
783 } else {
784 /* Somebody might have asked allocation of a non-standard resource */
785 return;
786 }
787
788 /* XXX 64-bit MEM handling is not %100 correct... XXX */
789 is_64bit = 0;
790 if (res->flags & IORESOURCE_IO)
791 root = &pbm->io_space;
792 else {
793 root = &pbm->mem_space;
794 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
795 == PCI_BASE_ADDRESS_MEM_TYPE_64)
796 is_64bit = 1;
797 }
798
799 size = res->end - res->start;
800 pci_read_config_dword(pdev, where, &reg);
801 reg = ((reg & size) |
802 (((u32)(res->start - root->start)) & ~size));
803 if (resource == PCI_ROM_RESOURCE) {
804 reg |= PCI_ROM_ADDRESS_ENABLE;
805 res->flags |= IORESOURCE_ROM_ENABLE;
806 }
807 pci_write_config_dword(pdev, where, reg);
808
809 /* This knows that the upper 32-bits of the address
810 * must be zero. Our PCI common layer enforces this.
811 */
812 if (is_64bit)
813 pci_write_config_dword(pdev, where + 4, 0);
814}
815
816static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
817 struct resource *res,
818 struct resource *root)
819{
820 res->start += root->start;
821 res->end += root->start;
822}
823
824/* Use ranges property to determine where PCI MEM, I/O, and Config
825 * space are for this PCI bus module.
826 */
827static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
828{
829 int i, saw_mem, saw_io;
830
831 saw_mem = saw_io = 0;
832 for (i = 0; i < pbm->num_pbm_ranges; i++) {
833 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
834 unsigned long a;
835 int type;
836
837 type = (pr->child_phys_hi >> 24) & 0x3;
838 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
839 ((unsigned long)pr->parent_phys_lo << 0UL));
840
841 switch (type) {
842 case 1:
843 /* 16-bit IO space, 16MB */
844 pbm->io_space.start = a;
845 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
846 pbm->io_space.flags = IORESOURCE_IO;
847 saw_io = 1;
848 break;
849
850 case 2:
851 /* 32-bit MEM space, 2GB */
852 pbm->mem_space.start = a;
853 pbm->mem_space.end = a + (0x80000000UL - 1UL);
854 pbm->mem_space.flags = IORESOURCE_MEM;
855 saw_mem = 1;
856 break;
857
858 case 3:
859 /* XXX 64-bit MEM handling XXX */
860
861 default:
862 break;
863 };
864 }
865
866 if (!saw_io || !saw_mem) {
867 prom_printf("%s: Fatal error, missing %s PBM range.\n",
868 pbm->name,
869 (!saw_io ? "IO" : "MEM"));
870 prom_halt();
871 }
872
873 printk("%s: PCI IO[%lx] MEM[%lx]\n",
874 pbm->name,
875 pbm->io_space.start,
876 pbm->mem_space.start);
877}
878
879static void pbm_register_toplevel_resources(struct pci_controller_info *p,
880 struct pci_pbm_info *pbm)
881{
882 pbm->io_space.name = pbm->mem_space.name = pbm->name;
883
884 request_resource(&ioport_resource, &pbm->io_space);
885 request_resource(&iomem_resource, &pbm->mem_space);
886 pci_register_legacy_regions(&pbm->io_space,
887 &pbm->mem_space);
888}
889
890static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
891 struct pci_iommu *iommu)
892{
893 struct pci_iommu_arena *arena = &iommu->arena;
894 unsigned long i, cnt = 0;
895 u32 devhandle;
896
897 devhandle = pbm->devhandle;
898 for (i = 0; i < arena->limit; i++) {
899 unsigned long ret, io_attrs, ra;
900
901 ret = pci_sun4v_iommu_getmap(devhandle,
902 HV_PCI_TSBID(0, i),
903 &io_attrs, &ra);
904 if (ret == HV_EOK) {
905 cnt++;
906 __set_bit(i, arena->map);
907 }
908 }
909
910 return cnt;
911}
912
913static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
914{
915 struct pci_iommu *iommu = pbm->iommu;
916 unsigned long num_tsb_entries, sz;
917 u32 vdma[2], dma_mask, dma_offset;
918 int err, tsbsize;
919
920 err = prom_getproperty(pbm->prom_node, "virtual-dma",
921 (char *)&vdma[0], sizeof(vdma));
922 if (err == 0 || err == -1) {
923 /* No property, use default values. */
924 vdma[0] = 0x80000000;
925 vdma[1] = 0x80000000;
926 }
927
928 dma_mask = vdma[0];
929 switch (vdma[1]) {
930 case 0x20000000:
931 dma_mask |= 0x1fffffff;
932 tsbsize = 64;
933 break;
934
935 case 0x40000000:
936 dma_mask |= 0x3fffffff;
937 tsbsize = 128;
938 break;
939
940 case 0x80000000:
941 dma_mask |= 0x7fffffff;
942 tsbsize = 256;
943 break;
944
945 default:
946 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
947 prom_halt();
948 };
949
950 tsbsize *= (8 * 1024);
951
952 num_tsb_entries = tsbsize / sizeof(iopte_t);
953
954 dma_offset = vdma[0];
955
956 /* Setup initial software IOMMU state. */
957 spin_lock_init(&iommu->lock);
958 iommu->ctx_lowest_free = 1;
959 iommu->page_table_map_base = dma_offset;
960 iommu->dma_addr_mask = dma_mask;
961
962 /* Allocate and initialize the free area map. */
963 sz = num_tsb_entries / 8;
964 sz = (sz + 7UL) & ~7UL;
965 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
966 if (!iommu->arena.map) {
967 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
968 prom_halt();
969 }
970 memset(iommu->arena.map, 0, sz);
971 iommu->arena.limit = num_tsb_entries;
972
973 sz = probe_existing_entries(pbm, iommu);
974
975 printk("%s: TSB entries [%lu], existing mapings [%lu]\n",
976 pbm->name, num_tsb_entries, sz);
977}
978
979static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
980{
981 unsigned int busrange[2];
982 int prom_node = pbm->prom_node;
983 int err;
984
985 err = prom_getproperty(prom_node, "bus-range",
986 (char *)&busrange[0],
987 sizeof(busrange));
988 if (err == 0 || err == -1) {
989 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
990 prom_halt();
991 }
992
993 pbm->pci_first_busno = busrange[0];
994 pbm->pci_last_busno = busrange[1];
995
996}
997
998static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle)
999{
1000 struct pci_pbm_info *pbm;
1001 int err, i;
1002
1003 if (devhandle & 0x40)
1004 pbm = &p->pbm_B;
1005 else
1006 pbm = &p->pbm_A;
1007
1008 pbm->parent = p;
1009 pbm->prom_node = prom_node;
1010 pbm->pci_first_slot = 1;
1011
1012 pbm->devhandle = devhandle;
1013
1014 sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
1015 p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
1016
1017 printk("%s: devhandle[%x] prom_node[%x:%x]\n",
1018 pbm->name, pbm->devhandle,
1019 pbm->prom_node, prom_getchild(pbm->prom_node));
1020
1021 prom_getstring(prom_node, "name",
1022 pbm->prom_name, sizeof(pbm->prom_name));
1023
1024 err = prom_getproperty(prom_node, "ranges",
1025 (char *) pbm->pbm_ranges,
1026 sizeof(pbm->pbm_ranges));
1027 if (err == 0 || err == -1) {
1028 prom_printf("%s: Fatal error, no ranges property.\n",
1029 pbm->name);
1030 prom_halt();
1031 }
1032
1033 pbm->num_pbm_ranges =
1034 (err / sizeof(struct linux_prom_pci_ranges));
1035
1036 /* Mask out the top 8 bits of the ranges, leaving the real
1037 * physical address.
1038 */
1039 for (i = 0; i < pbm->num_pbm_ranges; i++)
1040 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
1041
1042 pci_sun4v_determine_mem_io_space(pbm);
1043 pbm_register_toplevel_resources(p, pbm);
1044
1045 err = prom_getproperty(prom_node, "interrupt-map",
1046 (char *)pbm->pbm_intmap,
1047 sizeof(pbm->pbm_intmap));
1048 if (err == 0 || err == -1) {
1049 prom_printf("%s: Fatal error, no interrupt-map property.\n",
1050 pbm->name);
1051 prom_halt();
1052 }
1053
1054 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
1055 err = prom_getproperty(prom_node, "interrupt-map-mask",
1056 (char *)&pbm->pbm_intmask,
1057 sizeof(pbm->pbm_intmask));
1058 if (err == 0 || err == -1) {
1059 prom_printf("%s: Fatal error, no interrupt-map-mask.\n",
1060 pbm->name);
1061 prom_halt();
1062 }
1063
1064 pci_sun4v_get_bus_range(pbm);
1065 pci_sun4v_iommu_init(pbm);
1066}
1067
1068void sun4v_pci_init(int node, char *model_name)
1069{
1070 struct pci_controller_info *p;
1071 struct pci_iommu *iommu;
1072 struct linux_prom64_registers regs;
1073 u32 devhandle;
1074 int i;
1075
1076 prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
1077 devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
1078
1079 for (p = pci_controller_root; p; p = p->next) {
1080 struct pci_pbm_info *pbm;
1081
1082 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
1083 continue;
1084
1085 pbm = (p->pbm_A.prom_node ?
1086 &p->pbm_A :
1087 &p->pbm_B);
1088
1089 if (pbm->devhandle == (devhandle ^ 0x40)) {
1090 pci_sun4v_pbm_init(p, node, devhandle);
1091 return;
1092 }
1093 }
1094
1095 for_each_cpu(i) {
1096 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1097
1098 if (!page)
1099 goto fatal_memory_error;
1100
1101 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
1102 }
1103
1104 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1105 if (!p)
1106 goto fatal_memory_error;
1107
1108 memset(p, 0, sizeof(*p));
1109
1110 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1111 if (!iommu)
1112 goto fatal_memory_error;
1113
1114 memset(iommu, 0, sizeof(*iommu));
1115 p->pbm_A.iommu = iommu;
1116
1117 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1118 if (!iommu)
1119 goto fatal_memory_error;
1120
1121 memset(iommu, 0, sizeof(*iommu));
1122 p->pbm_B.iommu = iommu;
1123
1124 p->next = pci_controller_root;
1125 pci_controller_root = p;
1126
1127 p->index = pci_num_controllers++;
1128 p->pbms_same_domain = 0;
1129
1130 p->scan_bus = pci_sun4v_scan_bus;
1131 p->irq_build = pci_sun4v_irq_build;
1132 p->base_address_update = pci_sun4v_base_address_update;
1133 p->resource_adjust = pci_sun4v_resource_adjust;
1134 p->pci_ops = &pci_sun4v_ops;
1135
1136 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1137 * for memory space.
1138 */
1139 pci_memspace_mask = 0x7fffffffUL;
1140
1141 pci_sun4v_pbm_init(p, node, devhandle);
1142 return;
1143
1144fatal_memory_error:
1145 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1146 prom_halt();
1147}
diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h
new file mode 100644
index 000000000000..884d25f6158d
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sun4v.h
@@ -0,0 +1,31 @@
1/* pci_sun4v.h: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _PCI_SUN4V_H
7#define _PCI_SUN4V_H
8
9extern long pci_sun4v_iommu_map(unsigned long devhandle,
10 unsigned long tsbid,
11 unsigned long num_ttes,
12 unsigned long io_attributes,
13 unsigned long io_page_list_pa);
14extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
15 unsigned long tsbid,
16 unsigned long num_ttes);
17extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
18 unsigned long tsbid,
19 unsigned long *io_attributes,
20 unsigned long *real_address);
21extern unsigned long pci_sun4v_config_get(unsigned long devhandle,
22 unsigned long pci_device,
23 unsigned long config_offset,
24 unsigned long size);
25extern int pci_sun4v_config_put(unsigned long devhandle,
26 unsigned long pci_device,
27 unsigned long config_offset,
28 unsigned long size,
29 unsigned long data);
30
31#endif /* !(_PCI_SUN4V_H) */
diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S
new file mode 100644
index 000000000000..6604fdbf746c
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sun4v_asm.S
@@ -0,0 +1,95 @@
1/* pci_sun4v_asm: Hypervisor calls for PCI support.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <asm/hypervisor.h>
7
8 /* %o0: devhandle
9 * %o1: tsbid
10 * %o2: num ttes
11 * %o3: io_attributes
12 * %o4: io_page_list phys address
13 *
14 * returns %o0: -status if status was non-zero, else
15 * %o0: num pages mapped
16 */
17 .globl pci_sun4v_iommu_map
18pci_sun4v_iommu_map:
19 mov %o5, %g1
20 mov HV_FAST_PCI_IOMMU_MAP, %o5
21 ta HV_FAST_TRAP
22 brnz,pn %o0, 1f
23 sub %g0, %o0, %o0
24 mov %o1, %o0
251: retl
26 nop
27
28 /* %o0: devhandle
29 * %o1: tsbid
30 * %o2: num ttes
31 *
32 * returns %o0: num ttes demapped
33 */
34 .globl pci_sun4v_iommu_demap
35pci_sun4v_iommu_demap:
36 mov HV_FAST_PCI_IOMMU_DEMAP, %o5
37 ta HV_FAST_TRAP
38 retl
39 mov %o1, %o0
40
41 /* %o0: devhandle
42 * %o1: tsbid
43 * %o2: &io_attributes
44 * %o3: &real_address
45 *
46 * returns %o0: status
47 */
48 .globl pci_sun4v_iommu_getmap
49pci_sun4v_iommu_getmap:
50 mov %o2, %o4
51 mov HV_FAST_PCI_IOMMU_GETMAP, %o5
52 ta HV_FAST_TRAP
53 stx %o1, [%o4]
54 stx %o2, [%o3]
55 retl
56 mov %o0, %o0
57
58 /* %o0: devhandle
59 * %o1: pci_device
60 * %o2: pci_config_offset
61 * %o3: size
62 *
63 * returns %o0: data
64 *
65 * If there is an error, the data will be returned
66 * as all 1's.
67 */
68 .globl pci_sun4v_config_get
69pci_sun4v_config_get:
70 mov HV_FAST_PCI_CONFIG_GET, %o5
71 ta HV_FAST_TRAP
72 brnz,a,pn %o1, 1f
73 mov -1, %o2
741: retl
75 mov %o2, %o0
76
77 /* %o0: devhandle
78 * %o1: pci_device
79 * %o2: pci_config_offset
80 * %o3: size
81 * %o4: data
82 *
83 * returns %o0: status
84 *
85 * status will be zero if the operation completed
86 * successfully, else -1 if not
87 */
88 .globl pci_sun4v_config_put
89pci_sun4v_config_put:
90 mov HV_FAST_PCI_CONFIG_PUT, %o5
91 ta HV_FAST_TRAP
92 brnz,a,pn %o1, 1f
93 mov -1, %o1
941: retl
95 mov %o1, %o0
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 059b0d025224..1c7ca2f712d9 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -44,83 +44,61 @@
44#include <asm/fpumacro.h> 44#include <asm/fpumacro.h>
45#include <asm/head.h> 45#include <asm/head.h>
46#include <asm/cpudata.h> 46#include <asm/cpudata.h>
47#include <asm/mmu_context.h>
47#include <asm/unistd.h> 48#include <asm/unistd.h>
49#include <asm/hypervisor.h>
48 50
49/* #define VERBOSE_SHOWREGS */ 51/* #define VERBOSE_SHOWREGS */
50 52
51/* 53static void sparc64_yield(void)
52 * Nothing special yet...
53 */
54void default_idle(void)
55{
56}
57
58#ifndef CONFIG_SMP
59
60/*
61 * the idle loop on a Sparc... ;)
62 */
63void cpu_idle(void)
64{ 54{
65 /* endless idle loop with no priority at all */ 55 if (tlb_type != hypervisor)
66 for (;;) { 56 return;
67 /* If current->work.need_resched is zero we should really
68 * setup for a system wakup event and execute a shutdown
69 * instruction.
70 *
71 * But this requires writing back the contents of the
72 * L2 cache etc. so implement this later. -DaveM
73 */
74 while (!need_resched())
75 barrier();
76 57
77 preempt_enable_no_resched(); 58 clear_thread_flag(TIF_POLLING_NRFLAG);
78 schedule(); 59 smp_mb__after_clear_bit();
79 preempt_disable(); 60
80 check_pgt_cache(); 61 while (!need_resched()) {
62 unsigned long pstate;
63
64 /* Disable interrupts. */
65 __asm__ __volatile__(
66 "rdpr %%pstate, %0\n\t"
67 "andn %0, %1, %0\n\t"
68 "wrpr %0, %%g0, %%pstate"
69 : "=&r" (pstate)
70 : "i" (PSTATE_IE));
71
72 if (!need_resched())
73 sun4v_cpu_yield();
74
75 /* Re-enable interrupts. */
76 __asm__ __volatile__(
77 "rdpr %%pstate, %0\n\t"
78 "or %0, %1, %0\n\t"
79 "wrpr %0, %%g0, %%pstate"
80 : "=&r" (pstate)
81 : "i" (PSTATE_IE));
81 } 82 }
82}
83 83
84#else 84 set_thread_flag(TIF_POLLING_NRFLAG);
85}
85 86
86/* 87/* The idle loop on sparc64. */
87 * the idle loop on a UltraMultiPenguin...
88 *
89 * TIF_POLLING_NRFLAG is set because we do not sleep the cpu
90 * inside of the idler task, so an interrupt is not needed
91 * to get a clean fast response.
92 *
93 * XXX Reverify this assumption... -DaveM
94 *
95 * Addendum: We do want it to do something for the signal
96 * delivery case, we detect that by just seeing
97 * if we are trying to send this to an idler or not.
98 */
99void cpu_idle(void) 88void cpu_idle(void)
100{ 89{
101 cpuinfo_sparc *cpuinfo = &local_cpu_data();
102 set_thread_flag(TIF_POLLING_NRFLAG); 90 set_thread_flag(TIF_POLLING_NRFLAG);
103 91
104 while(1) { 92 while(1) {
105 if (need_resched()) { 93 if (need_resched()) {
106 cpuinfo->idle_volume = 0;
107 preempt_enable_no_resched(); 94 preempt_enable_no_resched();
108 schedule(); 95 schedule();
109 preempt_disable(); 96 preempt_disable();
110 check_pgt_cache();
111 } 97 }
112 cpuinfo->idle_volume++; 98 sparc64_yield();
113
114 /* The store ordering is so that IRQ handlers on
115 * other cpus see our increasing idleness for the buddy
116 * redistribution algorithm. -DaveM
117 */
118 membar_storeload_storestore();
119 } 99 }
120} 100}
121 101
122#endif
123
124extern char reboot_command []; 102extern char reboot_command [];
125 103
126extern void (*prom_palette)(int); 104extern void (*prom_palette)(int);
@@ -354,6 +332,7 @@ void show_regs(struct pt_regs *regs)
354 extern long etrap, etraptl1; 332 extern long etrap, etraptl1;
355#endif 333#endif
356 __show_regs(regs); 334 __show_regs(regs);
335#if 0
357#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
358 { 337 {
359 extern void smp_report_regs(void); 338 extern void smp_report_regs(void);
@@ -361,6 +340,7 @@ void show_regs(struct pt_regs *regs)
361 smp_report_regs(); 340 smp_report_regs();
362 } 341 }
363#endif 342#endif
343#endif
364 344
365#ifdef VERBOSE_SHOWREGS 345#ifdef VERBOSE_SHOWREGS
366 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 && 346 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
@@ -433,30 +413,15 @@ void exit_thread(void)
433void flush_thread(void) 413void flush_thread(void)
434{ 414{
435 struct thread_info *t = current_thread_info(); 415 struct thread_info *t = current_thread_info();
416 struct mm_struct *mm;
436 417
437 if (t->flags & _TIF_ABI_PENDING) 418 if (t->flags & _TIF_ABI_PENDING)
438 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); 419 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
439 420
440 if (t->task->mm) { 421 mm = t->task->mm;
441 unsigned long pgd_cache = 0UL; 422 if (mm)
442 if (test_thread_flag(TIF_32BIT)) { 423 tsb_context_switch(mm);
443 struct mm_struct *mm = t->task->mm;
444 pgd_t *pgd0 = &mm->pgd[0];
445 pud_t *pud0 = pud_offset(pgd0, 0);
446 424
447 if (pud_none(*pud0)) {
448 pmd_t *page = pmd_alloc_one(mm, 0);
449 pud_set(pud0, page);
450 }
451 pgd_cache = get_pgd_cache(pgd0);
452 }
453 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
454 "membar #Sync"
455 : /* no outputs */
456 : "r" (pgd_cache),
457 "r" (TSB_REG),
458 "i" (ASI_DMMU));
459 }
460 set_thread_wsaved(0); 425 set_thread_wsaved(0);
461 426
462 /* Turn off performance counters if on. */ 427 /* Turn off performance counters if on. */
@@ -555,6 +520,18 @@ void synchronize_user_stack(void)
555 } 520 }
556} 521}
557 522
523static void stack_unaligned(unsigned long sp)
524{
525 siginfo_t info;
526
527 info.si_signo = SIGBUS;
528 info.si_errno = 0;
529 info.si_code = BUS_ADRALN;
530 info.si_addr = (void __user *) sp;
531 info.si_trapno = 0;
532 force_sig_info(SIGBUS, &info, current);
533}
534
558void fault_in_user_windows(void) 535void fault_in_user_windows(void)
559{ 536{
560 struct thread_info *t = current_thread_info(); 537 struct thread_info *t = current_thread_info();
@@ -570,13 +547,17 @@ void fault_in_user_windows(void)
570 flush_user_windows(); 547 flush_user_windows();
571 window = get_thread_wsaved(); 548 window = get_thread_wsaved();
572 549
573 if (window != 0) { 550 if (likely(window != 0)) {
574 window -= 1; 551 window -= 1;
575 do { 552 do {
576 unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 553 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
577 struct reg_window *rwin = &t->reg_window[window]; 554 struct reg_window *rwin = &t->reg_window[window];
578 555
579 if (copy_to_user((char __user *)sp, rwin, winsize)) 556 if (unlikely(sp & 0x7UL))
557 stack_unaligned(sp);
558
559 if (unlikely(copy_to_user((char __user *)sp,
560 rwin, winsize)))
580 goto barf; 561 goto barf;
581 } while (window--); 562 } while (window--);
582 } 563 }
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 3f9746f856d2..eb93e9c52846 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -124,6 +124,9 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
124{ 124{
125 BUG_ON(len > PAGE_SIZE); 125 BUG_ON(len > PAGE_SIZE);
126 126
127 if (tlb_type == hypervisor)
128 return;
129
127#ifdef DCACHE_ALIASING_POSSIBLE 130#ifdef DCACHE_ALIASING_POSSIBLE
128 /* If bit 13 of the kernel address we used to access the 131 /* If bit 13 of the kernel address we used to access the
129 * user page is the same as the virtual address that page 132 * user page is the same as the virtual address that page
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index b80eba0081ca..7130e866f935 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -223,12 +223,26 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
223 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 223 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
224 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 224 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
225 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 225 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
226 mov TSB_REG, %g6 226 brz,pt %l3, 1f
227 brnz,a,pn %l3, 1f 227 mov %g6, %l2
228 ldxa [%g6] ASI_IMMU, %g5 228
2291: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 229 /* Must do this before thread reg is clobbered below. */
230 LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
2311:
232 ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
230 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 233 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
231 wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate 234
235 /* Normal globals are restored, go to trap globals. */
236661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
237 nop
238 .section .sun4v_2insn_patch, "ax"
239 .word 661b
240 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
241 SET_GL(1)
242 .previous
243
244 mov %l2, %g6
245
232 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 246 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
233 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 247 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
234 248
@@ -252,27 +266,108 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
252 266
253 brnz,pn %l3, kern_rtt 267 brnz,pn %l3, kern_rtt
254 mov PRIMARY_CONTEXT, %l7 268 mov PRIMARY_CONTEXT, %l7
255 ldxa [%l7 + %l7] ASI_DMMU, %l0 269
270661: ldxa [%l7 + %l7] ASI_DMMU, %l0
271 .section .sun4v_1insn_patch, "ax"
272 .word 661b
273 ldxa [%l7 + %l7] ASI_MMU, %l0
274 .previous
275
256 sethi %hi(sparc64_kern_pri_nuc_bits), %l1 276 sethi %hi(sparc64_kern_pri_nuc_bits), %l1
257 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 277 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
258 or %l0, %l1, %l0 278 or %l0, %l1, %l0
259 stxa %l0, [%l7] ASI_DMMU 279
260 flush %g6 280661: stxa %l0, [%l7] ASI_DMMU
281 .section .sun4v_1insn_patch, "ax"
282 .word 661b
283 stxa %l0, [%l7] ASI_MMU
284 .previous
285
286 sethi %hi(KERNBASE), %l7
287 flush %l7
261 rdpr %wstate, %l1 288 rdpr %wstate, %l1
262 rdpr %otherwin, %l2 289 rdpr %otherwin, %l2
263 srl %l1, 3, %l1 290 srl %l1, 3, %l1
264 291
265 wrpr %l2, %g0, %canrestore 292 wrpr %l2, %g0, %canrestore
266 wrpr %l1, %g0, %wstate 293 wrpr %l1, %g0, %wstate
267 wrpr %g0, %g0, %otherwin 294 brnz,pt %l2, user_rtt_restore
295 wrpr %g0, %g0, %otherwin
296
297 ldx [%g6 + TI_FLAGS], %g3
298 wr %g0, ASI_AIUP, %asi
299 rdpr %cwp, %g1
300 andcc %g3, _TIF_32BIT, %g0
301 sub %g1, 1, %g1
302 bne,pt %xcc, user_rtt_fill_32bit
303 wrpr %g1, %cwp
304 ba,a,pt %xcc, user_rtt_fill_64bit
305
306user_rtt_fill_fixup:
307 rdpr %cwp, %g1
308 add %g1, 1, %g1
309 wrpr %g1, 0x0, %cwp
310
311 rdpr %wstate, %g2
312 sll %g2, 3, %g2
313 wrpr %g2, 0x0, %wstate
314
315 /* We know %canrestore and %otherwin are both zero. */
316
317 sethi %hi(sparc64_kern_pri_context), %g2
318 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
319 mov PRIMARY_CONTEXT, %g1
320
321661: stxa %g2, [%g1] ASI_DMMU
322 .section .sun4v_1insn_patch, "ax"
323 .word 661b
324 stxa %g2, [%g1] ASI_MMU
325 .previous
326
327 sethi %hi(KERNBASE), %g1
328 flush %g1
329
330 or %g4, FAULT_CODE_WINFIXUP, %g4
331 stb %g4, [%g6 + TI_FAULT_CODE]
332 stx %g5, [%g6 + TI_FAULT_ADDR]
333
334 mov %g6, %l1
335 wrpr %g0, 0x0, %tl
336
337661: nop
338 .section .sun4v_1insn_patch, "ax"
339 .word 661b
340 SET_GL(0)
341 .previous
342
343 wrpr %g0, RTRAP_PSTATE, %pstate
344
345 mov %l1, %g6
346 ldx [%g6 + TI_TASK], %g4
347 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
348 call do_sparc64_fault
349 add %sp, PTREGS_OFF, %o0
350 ba,pt %xcc, rtrap
351 nop
352
353user_rtt_pre_restore:
354 add %g1, 1, %g1
355 wrpr %g1, 0x0, %cwp
356
357user_rtt_restore:
268 restore 358 restore
269 rdpr %canrestore, %g1 359 rdpr %canrestore, %g1
270 wrpr %g1, 0x0, %cleanwin 360 wrpr %g1, 0x0, %cleanwin
271 retry 361 retry
272 nop 362 nop
273 363
274kern_rtt: restore 364kern_rtt: rdpr %canrestore, %g1
365 brz,pn %g1, kern_rtt_fill
366 nop
367kern_rtt_restore:
368 restore
275 retry 369 retry
370
276to_kernel: 371to_kernel:
277#ifdef CONFIG_PREEMPT 372#ifdef CONFIG_PREEMPT
278 ldsw [%g6 + TI_PRE_COUNT], %l5 373 ldsw [%g6 + TI_PRE_COUNT], %l5
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index d95a1bcf163d..1d6ffdeabd4c 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -693,11 +693,11 @@ void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
693 693
694/* SBUS SYSIO INO number to Sparc PIL level. */ 694/* SBUS SYSIO INO number to Sparc PIL level. */
695static unsigned char sysio_ino_to_pil[] = { 695static unsigned char sysio_ino_to_pil[] = {
696 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */ 696 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 0 */
697 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */ 697 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 1 */
698 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */ 698 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 2 */
699 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */ 699 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 3 */
700 4, /* Onboard SCSI */ 700 5, /* Onboard SCSI */
701 5, /* Onboard Ethernet */ 701 5, /* Onboard Ethernet */
702/*XXX*/ 8, /* Onboard BPP */ 702/*XXX*/ 8, /* Onboard BPP */
703 0, /* Bogon */ 703 0, /* Bogon */
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 158bd31e15b7..7d0e67c1ce50 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -64,12 +64,6 @@ struct screen_info screen_info = {
64 16 /* orig-video-points */ 64 16 /* orig-video-points */
65}; 65};
66 66
67/* Typing sync at the prom prompt calls the function pointed to by
68 * the sync callback which I set to the following function.
69 * This should sync all filesystems and return, for now it just
70 * prints out pretty messages and returns.
71 */
72
73void (*prom_palette)(int); 67void (*prom_palette)(int);
74void (*prom_keyboard)(void); 68void (*prom_keyboard)(void);
75 69
@@ -79,259 +73,6 @@ prom_console_write(struct console *con, const char *s, unsigned n)
79 prom_write(s, n); 73 prom_write(s, n);
80} 74}
81 75
82static struct console prom_console = {
83 .name = "prom",
84 .write = prom_console_write,
85 .flags = CON_CONSDEV | CON_ENABLED,
86 .index = -1,
87};
88
89#define PROM_TRUE -1
90#define PROM_FALSE 0
91
92/* Pretty sick eh? */
93int prom_callback(long *args)
94{
95 struct console *cons, *saved_console = NULL;
96 unsigned long flags;
97 char *cmd;
98 extern spinlock_t prom_entry_lock;
99
100 if (!args)
101 return -1;
102 if (!(cmd = (char *)args[0]))
103 return -1;
104
105 /*
106 * The callback can be invoked on the cpu that first dropped
107 * into prom_cmdline after taking the serial interrupt, or on
108 * a slave processor that was smp_captured() if the
109 * administrator has done a switch-cpu inside obp. In either
110 * case, the cpu is marked as in-interrupt. Drop IRQ locks.
111 */
112 irq_exit();
113
114 /* XXX Revisit the locking here someday. This is a debugging
115 * XXX feature so it isnt all that critical. -DaveM
116 */
117 local_irq_save(flags);
118
119 spin_unlock(&prom_entry_lock);
120 cons = console_drivers;
121 while (cons) {
122 unregister_console(cons);
123 cons->flags &= ~(CON_PRINTBUFFER);
124 cons->next = saved_console;
125 saved_console = cons;
126 cons = console_drivers;
127 }
128 register_console(&prom_console);
129 if (!strcmp(cmd, "sync")) {
130 prom_printf("PROM `%s' command...\n", cmd);
131 show_free_areas();
132 if (current->pid != 0) {
133 local_irq_enable();
134 sys_sync();
135 local_irq_disable();
136 }
137 args[2] = 0;
138 args[args[1] + 3] = -1;
139 prom_printf("Returning to PROM\n");
140 } else if (!strcmp(cmd, "va>tte-data")) {
141 unsigned long ctx, va;
142 unsigned long tte = 0;
143 long res = PROM_FALSE;
144
145 ctx = args[3];
146 va = args[4];
147 if (ctx) {
148 /*
149 * Find process owning ctx, lookup mapping.
150 */
151 struct task_struct *p;
152 struct mm_struct *mm = NULL;
153 pgd_t *pgdp;
154 pud_t *pudp;
155 pmd_t *pmdp;
156 pte_t *ptep;
157 pte_t pte;
158
159 for_each_process(p) {
160 mm = p->mm;
161 if (CTX_NRBITS(mm->context) == ctx)
162 break;
163 }
164 if (!mm ||
165 CTX_NRBITS(mm->context) != ctx)
166 goto done;
167
168 pgdp = pgd_offset(mm, va);
169 if (pgd_none(*pgdp))
170 goto done;
171 pudp = pud_offset(pgdp, va);
172 if (pud_none(*pudp))
173 goto done;
174 pmdp = pmd_offset(pudp, va);
175 if (pmd_none(*pmdp))
176 goto done;
177
178 /* Preemption implicitly disabled by virtue of
179 * being called from inside OBP.
180 */
181 ptep = pte_offset_map(pmdp, va);
182 pte = *ptep;
183 if (pte_present(pte)) {
184 tte = pte_val(pte);
185 res = PROM_TRUE;
186 }
187 pte_unmap(ptep);
188 goto done;
189 }
190
191 if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
192 extern unsigned long sparc64_kern_pri_context;
193
194 /* Spitfire Errata #32 workaround */
195 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
196 "flush %%g6"
197 : /* No outputs */
198 : "r" (sparc64_kern_pri_context),
199 "r" (PRIMARY_CONTEXT),
200 "i" (ASI_DMMU));
201
202 /*
203 * Locked down tlb entry.
204 */
205
206 if (tlb_type == spitfire)
207 tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
208 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
209 tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
210
211 res = PROM_TRUE;
212 goto done;
213 }
214
215 if (va < PGDIR_SIZE) {
216 /*
217 * vmalloc or prom_inherited mapping.
218 */
219 pgd_t *pgdp;
220 pud_t *pudp;
221 pmd_t *pmdp;
222 pte_t *ptep;
223 pte_t pte;
224 int error;
225
226 if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
227 tte = prom_virt_to_phys(va, &error);
228 if (!error)
229 res = PROM_TRUE;
230 goto done;
231 }
232 pgdp = pgd_offset_k(va);
233 if (pgd_none(*pgdp))
234 goto done;
235 pudp = pud_offset(pgdp, va);
236 if (pud_none(*pudp))
237 goto done;
238 pmdp = pmd_offset(pudp, va);
239 if (pmd_none(*pmdp))
240 goto done;
241
242 /* Preemption implicitly disabled by virtue of
243 * being called from inside OBP.
244 */
245 ptep = pte_offset_kernel(pmdp, va);
246 pte = *ptep;
247 if (pte_present(pte)) {
248 tte = pte_val(pte);
249 res = PROM_TRUE;
250 }
251 goto done;
252 }
253
254 if (va < PAGE_OFFSET) {
255 /*
256 * No mappings here.
257 */
258 goto done;
259 }
260
261 if (va & (1UL << 40)) {
262 /*
263 * I/O page.
264 */
265
266 tte = (__pa(va) & _PAGE_PADDR) |
267 _PAGE_VALID | _PAGE_SZ4MB |
268 _PAGE_E | _PAGE_P | _PAGE_W;
269 res = PROM_TRUE;
270 goto done;
271 }
272
273 /*
274 * Normal page.
275 */
276 tte = (__pa(va) & _PAGE_PADDR) |
277 _PAGE_VALID | _PAGE_SZ4MB |
278 _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
279 res = PROM_TRUE;
280
281 done:
282 if (res == PROM_TRUE) {
283 args[2] = 3;
284 args[args[1] + 3] = 0;
285 args[args[1] + 4] = res;
286 args[args[1] + 5] = tte;
287 } else {
288 args[2] = 2;
289 args[args[1] + 3] = 0;
290 args[args[1] + 4] = res;
291 }
292 } else if (!strcmp(cmd, ".soft1")) {
293 unsigned long tte;
294
295 tte = args[3];
296 prom_printf("%lx:\"%s%s%s%s%s\" ",
297 (tte & _PAGE_SOFT) >> 7,
298 tte & _PAGE_MODIFIED ? "M" : "-",
299 tte & _PAGE_ACCESSED ? "A" : "-",
300 tte & _PAGE_READ ? "W" : "-",
301 tte & _PAGE_WRITE ? "R" : "-",
302 tte & _PAGE_PRESENT ? "P" : "-");
303
304 args[2] = 2;
305 args[args[1] + 3] = 0;
306 args[args[1] + 4] = PROM_TRUE;
307 } else if (!strcmp(cmd, ".soft2")) {
308 unsigned long tte;
309
310 tte = args[3];
311 prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
312
313 args[2] = 2;
314 args[args[1] + 3] = 0;
315 args[args[1] + 4] = PROM_TRUE;
316 } else {
317 prom_printf("unknown PROM `%s' command...\n", cmd);
318 }
319 unregister_console(&prom_console);
320 while (saved_console) {
321 cons = saved_console;
322 saved_console = cons->next;
323 register_console(cons);
324 }
325 spin_lock(&prom_entry_lock);
326 local_irq_restore(flags);
327
328 /*
329 * Restore in-interrupt status for a resume from obp.
330 */
331 irq_enter();
332 return 0;
333}
334
335unsigned int boot_flags = 0; 76unsigned int boot_flags = 0;
336#define BOOTME_DEBUG 0x1 77#define BOOTME_DEBUG 0x1
337#define BOOTME_SINGLE 0x2 78#define BOOTME_SINGLE 0x2
@@ -479,15 +220,99 @@ char reboot_command[COMMAND_LINE_SIZE];
479 220
480static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; 221static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
481 222
482void register_prom_callbacks(void) 223static void __init per_cpu_patch(void)
483{ 224{
484 prom_setcallback(prom_callback); 225 struct cpuid_patch_entry *p;
485 prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; " 226 unsigned long ver;
486 "' linux-va>tte-data to va>tte-data"); 227 int is_jbus;
487 prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; " 228
488 "' linux-.soft1 to .soft1"); 229 if (tlb_type == spitfire && !this_is_starfire)
489 prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; " 230 return;
490 "' linux-.soft2 to .soft2"); 231
232 is_jbus = 0;
233 if (tlb_type != hypervisor) {
234 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
235 is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
236 (ver >> 32UL) == __SERRANO_ID);
237 }
238
239 p = &__cpuid_patch;
240 while (p < &__cpuid_patch_end) {
241 unsigned long addr = p->addr;
242 unsigned int *insns;
243
244 switch (tlb_type) {
245 case spitfire:
246 insns = &p->starfire[0];
247 break;
248 case cheetah:
249 case cheetah_plus:
250 if (is_jbus)
251 insns = &p->cheetah_jbus[0];
252 else
253 insns = &p->cheetah_safari[0];
254 break;
255 case hypervisor:
256 insns = &p->sun4v[0];
257 break;
258 default:
259 prom_printf("Unknown cpu type, halting.\n");
260 prom_halt();
261 };
262
263 *(unsigned int *) (addr + 0) = insns[0];
264 wmb();
265 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
266
267 *(unsigned int *) (addr + 4) = insns[1];
268 wmb();
269 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
270
271 *(unsigned int *) (addr + 8) = insns[2];
272 wmb();
273 __asm__ __volatile__("flush %0" : : "r" (addr + 8));
274
275 *(unsigned int *) (addr + 12) = insns[3];
276 wmb();
277 __asm__ __volatile__("flush %0" : : "r" (addr + 12));
278
279 p++;
280 }
281}
282
283static void __init sun4v_patch(void)
284{
285 struct sun4v_1insn_patch_entry *p1;
286 struct sun4v_2insn_patch_entry *p2;
287
288 if (tlb_type != hypervisor)
289 return;
290
291 p1 = &__sun4v_1insn_patch;
292 while (p1 < &__sun4v_1insn_patch_end) {
293 unsigned long addr = p1->addr;
294
295 *(unsigned int *) (addr + 0) = p1->insn;
296 wmb();
297 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
298
299 p1++;
300 }
301
302 p2 = &__sun4v_2insn_patch;
303 while (p2 < &__sun4v_2insn_patch_end) {
304 unsigned long addr = p2->addr;
305
306 *(unsigned int *) (addr + 0) = p2->insns[0];
307 wmb();
308 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
309
310 *(unsigned int *) (addr + 4) = p2->insns[1];
311 wmb();
312 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
313
314 p2++;
315 }
491} 316}
492 317
493void __init setup_arch(char **cmdline_p) 318void __init setup_arch(char **cmdline_p)
@@ -496,7 +321,10 @@ void __init setup_arch(char **cmdline_p)
496 *cmdline_p = prom_getbootargs(); 321 *cmdline_p = prom_getbootargs();
497 strcpy(saved_command_line, *cmdline_p); 322 strcpy(saved_command_line, *cmdline_p);
498 323
499 printk("ARCH: SUN4U\n"); 324 if (tlb_type == hypervisor)
325 printk("ARCH: SUN4V\n");
326 else
327 printk("ARCH: SUN4U\n");
500 328
501#ifdef CONFIG_DUMMY_CONSOLE 329#ifdef CONFIG_DUMMY_CONSOLE
502 conswitchp = &dummy_con; 330 conswitchp = &dummy_con;
@@ -507,6 +335,13 @@ void __init setup_arch(char **cmdline_p)
507 /* Work out if we are starfire early on */ 335 /* Work out if we are starfire early on */
508 check_if_starfire(); 336 check_if_starfire();
509 337
338 /* Now we know enough to patch the get_cpuid sequences
339 * used by trap code.
340 */
341 per_cpu_patch();
342
343 sun4v_patch();
344
510 boot_flags_init(*cmdline_p); 345 boot_flags_init(*cmdline_p);
511 346
512 idprom_init(); 347 idprom_init();
@@ -514,7 +349,7 @@ void __init setup_arch(char **cmdline_p)
514 if (!root_flags) 349 if (!root_flags)
515 root_mountflags &= ~MS_RDONLY; 350 root_mountflags &= ~MS_RDONLY;
516 ROOT_DEV = old_decode_dev(root_dev); 351 ROOT_DEV = old_decode_dev(root_dev);
517#ifdef CONFIG_BLK_DEV_INITRD 352#ifdef CONFIG_BLK_DEV_RAM
518 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; 353 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
519 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); 354 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
520 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); 355 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
@@ -544,6 +379,9 @@ void __init setup_arch(char **cmdline_p)
544 379
545 smp_setup_cpu_possible_map(); 380 smp_setup_cpu_possible_map();
546 381
382 /* Get boot processor trap_block[] setup. */
383 init_cur_cpu_trap(current_thread_info());
384
547 paging_init(); 385 paging_init();
548} 386}
549 387
@@ -565,6 +403,12 @@ static int __init set_preferred_console(void)
565 serial_console = 2; 403 serial_console = 2;
566 } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) { 404 } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) {
567 serial_console = 3; 405 serial_console = 3;
406 } else if (idev == PROMDEV_IVCONS && odev == PROMDEV_OVCONS) {
407 /* sunhv_console_init() doesn't check the serial_console
408 * value anyways...
409 */
410 serial_console = 4;
411 return add_preferred_console("ttyHV", 0, NULL);
568 } else { 412 } else {
569 prom_printf("Inconsistent console: " 413 prom_printf("Inconsistent console: "
570 "input %d, output %d\n", 414 "input %d, output %d\n",
@@ -598,9 +442,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
598 seq_printf(m, 442 seq_printf(m,
599 "cpu\t\t: %s\n" 443 "cpu\t\t: %s\n"
600 "fpu\t\t: %s\n" 444 "fpu\t\t: %s\n"
601 "promlib\t\t: Version 3 Revision %d\n" 445 "prom\t\t: %s\n"
602 "prom\t\t: %d.%d.%d\n" 446 "type\t\t: %s\n"
603 "type\t\t: sun4u\n"
604 "ncpus probed\t: %d\n" 447 "ncpus probed\t: %d\n"
605 "ncpus active\t: %d\n" 448 "ncpus active\t: %d\n"
606 "D$ parity tl1\t: %u\n" 449 "D$ parity tl1\t: %u\n"
@@ -612,10 +455,10 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
612 , 455 ,
613 sparc_cpu_type, 456 sparc_cpu_type,
614 sparc_fpu_type, 457 sparc_fpu_type,
615 prom_rev, 458 prom_version,
616 prom_prev >> 16, 459 ((tlb_type == hypervisor) ?
617 (prom_prev >> 8) & 0xff, 460 "sun4v" :
618 prom_prev & 0xff, 461 "sun4u"),
619 ncpus_probed, 462 ncpus_probed,
620 num_online_cpus(), 463 num_online_cpus(),
621 dcache_parity_tl1_occurred, 464 dcache_parity_tl1_occurred,
@@ -692,15 +535,11 @@ static int __init topology_init(void)
692 while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) 535 while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
693 ncpus_probed++; 536 ncpus_probed++;
694 537
695 for (i = 0; i < NR_CPUS; i++) { 538 for_each_cpu(i) {
696 if (cpu_possible(i)) { 539 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
697 struct cpu *p = kmalloc(sizeof(*p), GFP_KERNEL); 540 if (p) {
698 541 register_cpu(p, i, NULL);
699 if (p) { 542 err = 0;
700 memset(p, 0, sizeof(*p));
701 register_cpu(p, i, NULL);
702 err = 0;
703 }
704 } 543 }
705 } 544 }
706 545
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 1f7ad8a69052..373a701c90a5 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -38,6 +38,7 @@
38#include <asm/timer.h> 38#include <asm/timer.h>
39#include <asm/starfire.h> 39#include <asm/starfire.h>
40#include <asm/tlb.h> 40#include <asm/tlb.h>
41#include <asm/sections.h>
41 42
42extern void calibrate_delay(void); 43extern void calibrate_delay(void);
43 44
@@ -46,6 +47,8 @@ static unsigned char boot_cpu_id;
46 47
47cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 48cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
48cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; 49cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
50cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
51 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
49static cpumask_t smp_commenced_mask; 52static cpumask_t smp_commenced_mask;
50static cpumask_t cpu_callout_map; 53static cpumask_t cpu_callout_map;
51 54
@@ -77,7 +80,7 @@ void smp_bogo(struct seq_file *m)
77 80
78void __init smp_store_cpu_info(int id) 81void __init smp_store_cpu_info(int id)
79{ 82{
80 int cpu_node; 83 int cpu_node, def;
81 84
82 /* multiplier and counter set by 85 /* multiplier and counter set by
83 smp_setup_percpu_timer() */ 86 smp_setup_percpu_timer() */
@@ -87,24 +90,32 @@ void __init smp_store_cpu_info(int id)
87 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 90 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
88 "clock-frequency", 0); 91 "clock-frequency", 0);
89 92
90 cpu_data(id).pgcache_size = 0; 93 def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
91 cpu_data(id).pte_cache[0] = NULL;
92 cpu_data(id).pte_cache[1] = NULL;
93 cpu_data(id).pgd_cache = NULL;
94 cpu_data(id).idle_volume = 1;
95
96 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", 94 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
97 16 * 1024); 95 def);
96
97 def = 32;
98 cpu_data(id).dcache_line_size = 98 cpu_data(id).dcache_line_size =
99 prom_getintdefault(cpu_node, "dcache-line-size", 32); 99 prom_getintdefault(cpu_node, "dcache-line-size", def);
100
101 def = 16 * 1024;
100 cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", 102 cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
101 16 * 1024); 103 def);
104
105 def = 32;
102 cpu_data(id).icache_line_size = 106 cpu_data(id).icache_line_size =
103 prom_getintdefault(cpu_node, "icache-line-size", 32); 107 prom_getintdefault(cpu_node, "icache-line-size", def);
108
109 def = ((tlb_type == hypervisor) ?
110 (3 * 1024 * 1024) :
111 (4 * 1024 * 1024));
104 cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", 112 cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
105 4 * 1024 * 1024); 113 def);
114
115 def = 64;
106 cpu_data(id).ecache_line_size = 116 cpu_data(id).ecache_line_size =
107 prom_getintdefault(cpu_node, "ecache-line-size", 64); 117 prom_getintdefault(cpu_node, "ecache-line-size", def);
118
108 printk("CPU[%d]: Caches " 119 printk("CPU[%d]: Caches "
109 "D[sz(%d):line_sz(%d)] " 120 "D[sz(%d):line_sz(%d)] "
110 "I[sz(%d):line_sz(%d)] " 121 "I[sz(%d):line_sz(%d)] "
@@ -119,27 +130,16 @@ static void smp_setup_percpu_timer(void);
119 130
120static volatile unsigned long callin_flag = 0; 131static volatile unsigned long callin_flag = 0;
121 132
122extern void inherit_locked_prom_mappings(int save_p);
123
124static inline void cpu_setup_percpu_base(unsigned long cpu_id)
125{
126 __asm__ __volatile__("mov %0, %%g5\n\t"
127 "stxa %0, [%1] %2\n\t"
128 "membar #Sync"
129 : /* no outputs */
130 : "r" (__per_cpu_offset(cpu_id)),
131 "r" (TSB_REG), "i" (ASI_IMMU));
132}
133
134void __init smp_callin(void) 133void __init smp_callin(void)
135{ 134{
136 int cpuid = hard_smp_processor_id(); 135 int cpuid = hard_smp_processor_id();
137 136
138 inherit_locked_prom_mappings(0); 137 __local_per_cpu_offset = __per_cpu_offset(cpuid);
139 138
140 __flush_tlb_all(); 139 if (tlb_type == hypervisor)
140 sun4v_ktsb_register();
141 141
142 cpu_setup_percpu_base(cpuid); 142 __flush_tlb_all();
143 143
144 smp_setup_percpu_timer(); 144 smp_setup_percpu_timer();
145 145
@@ -316,6 +316,8 @@ static void smp_synchronize_one_tick(int cpu)
316 spin_unlock_irqrestore(&itc_sync_lock, flags); 316 spin_unlock_irqrestore(&itc_sync_lock, flags);
317} 317}
318 318
319extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
320
319extern unsigned long sparc64_cpu_startup; 321extern unsigned long sparc64_cpu_startup;
320 322
321/* The OBP cpu startup callback truncates the 3rd arg cookie to 323/* The OBP cpu startup callback truncates the 3rd arg cookie to
@@ -331,21 +333,31 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
331 unsigned long cookie = 333 unsigned long cookie =
332 (unsigned long)(&cpu_new_thread); 334 (unsigned long)(&cpu_new_thread);
333 struct task_struct *p; 335 struct task_struct *p;
334 int timeout, ret, cpu_node; 336 int timeout, ret;
335 337
336 p = fork_idle(cpu); 338 p = fork_idle(cpu);
337 callin_flag = 0; 339 callin_flag = 0;
338 cpu_new_thread = task_thread_info(p); 340 cpu_new_thread = task_thread_info(p);
339 cpu_set(cpu, cpu_callout_map); 341 cpu_set(cpu, cpu_callout_map);
340 342
341 cpu_find_by_mid(cpu, &cpu_node); 343 if (tlb_type == hypervisor) {
342 prom_startcpu(cpu_node, entry, cookie); 344 /* Alloc the mondo queues, cpu will load them. */
345 sun4v_init_mondo_queues(0, cpu, 1, 0);
346
347 prom_startcpu_cpuid(cpu, entry, cookie);
348 } else {
349 int cpu_node;
350
351 cpu_find_by_mid(cpu, &cpu_node);
352 prom_startcpu(cpu_node, entry, cookie);
353 }
343 354
344 for (timeout = 0; timeout < 5000000; timeout++) { 355 for (timeout = 0; timeout < 5000000; timeout++) {
345 if (callin_flag) 356 if (callin_flag)
346 break; 357 break;
347 udelay(100); 358 udelay(100);
348 } 359 }
360
349 if (callin_flag) { 361 if (callin_flag) {
350 ret = 0; 362 ret = 0;
351 } else { 363 } else {
@@ -441,7 +453,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
441static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 453static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
442{ 454{
443 u64 pstate, ver; 455 u64 pstate, ver;
444 int nack_busy_id, is_jalapeno; 456 int nack_busy_id, is_jbus;
445 457
446 if (cpus_empty(mask)) 458 if (cpus_empty(mask))
447 return; 459 return;
@@ -451,7 +463,8 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
451 * derivative processor. 463 * derivative processor.
452 */ 464 */
453 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 465 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
454 is_jalapeno = ((ver >> 32) == 0x003e0016); 466 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
467 (ver >> 32) == __SERRANO_ID);
455 468
456 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 469 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
457 470
@@ -476,7 +489,7 @@ retry:
476 for_each_cpu_mask(i, mask) { 489 for_each_cpu_mask(i, mask) {
477 u64 target = (i << 14) | 0x70; 490 u64 target = (i << 14) | 0x70;
478 491
479 if (!is_jalapeno) 492 if (!is_jbus)
480 target |= (nack_busy_id << 24); 493 target |= (nack_busy_id << 24);
481 __asm__ __volatile__( 494 __asm__ __volatile__(
482 "stxa %%g0, [%0] %1\n\t" 495 "stxa %%g0, [%0] %1\n\t"
@@ -529,7 +542,7 @@ retry:
529 for_each_cpu_mask(i, mask) { 542 for_each_cpu_mask(i, mask) {
530 u64 check_mask; 543 u64 check_mask;
531 544
532 if (is_jalapeno) 545 if (is_jbus)
533 check_mask = (0x2UL << (2*i)); 546 check_mask = (0x2UL << (2*i));
534 else 547 else
535 check_mask = (0x2UL << 548 check_mask = (0x2UL <<
@@ -544,6 +557,155 @@ retry:
544 } 557 }
545} 558}
546 559
560/* Multi-cpu list version. */
561static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
562{
563 struct trap_per_cpu *tb;
564 u16 *cpu_list;
565 u64 *mondo;
566 cpumask_t error_mask;
567 unsigned long flags, status;
568 int cnt, retries, this_cpu, prev_sent, i;
569
570 /* We have to do this whole thing with interrupts fully disabled.
571 * Otherwise if we send an xcall from interrupt context it will
572 * corrupt both our mondo block and cpu list state.
573 *
574 * One consequence of this is that we cannot use timeout mechanisms
575 * that depend upon interrupts being delivered locally. So, for
576 * example, we cannot sample jiffies and expect it to advance.
577 *
578 * Fortunately, udelay() uses %stick/%tick so we can use that.
579 */
580 local_irq_save(flags);
581
582 this_cpu = smp_processor_id();
583 tb = &trap_block[this_cpu];
584
585 mondo = __va(tb->cpu_mondo_block_pa);
586 mondo[0] = data0;
587 mondo[1] = data1;
588 mondo[2] = data2;
589 wmb();
590
591 cpu_list = __va(tb->cpu_list_pa);
592
593 /* Setup the initial cpu list. */
594 cnt = 0;
595 for_each_cpu_mask(i, mask)
596 cpu_list[cnt++] = i;
597
598 cpus_clear(error_mask);
599 retries = 0;
600 prev_sent = 0;
601 do {
602 int forward_progress, n_sent;
603
604 status = sun4v_cpu_mondo_send(cnt,
605 tb->cpu_list_pa,
606 tb->cpu_mondo_block_pa);
607
608 /* HV_EOK means all cpus received the xcall, we're done. */
609 if (likely(status == HV_EOK))
610 break;
611
612 /* First, see if we made any forward progress.
613 *
614 * The hypervisor indicates successful sends by setting
615 * cpu list entries to the value 0xffff.
616 */
617 n_sent = 0;
618 for (i = 0; i < cnt; i++) {
619 if (likely(cpu_list[i] == 0xffff))
620 n_sent++;
621 }
622
623 forward_progress = 0;
624 if (n_sent > prev_sent)
625 forward_progress = 1;
626
627 prev_sent = n_sent;
628
629 /* If we get a HV_ECPUERROR, then one or more of the cpus
630 * in the list are in error state. Use the cpu_state()
631 * hypervisor call to find out which cpus are in error state.
632 */
633 if (unlikely(status == HV_ECPUERROR)) {
634 for (i = 0; i < cnt; i++) {
635 long err;
636 u16 cpu;
637
638 cpu = cpu_list[i];
639 if (cpu == 0xffff)
640 continue;
641
642 err = sun4v_cpu_state(cpu);
643 if (err >= 0 &&
644 err == HV_CPU_STATE_ERROR) {
645 cpu_list[i] = 0xffff;
646 cpu_set(cpu, error_mask);
647 }
648 }
649 } else if (unlikely(status != HV_EWOULDBLOCK))
650 goto fatal_mondo_error;
651
652 /* Don't bother rewriting the CPU list, just leave the
653 * 0xffff and non-0xffff entries in there and the
654 * hypervisor will do the right thing.
655 *
656 * Only advance timeout state if we didn't make any
657 * forward progress.
658 */
659 if (unlikely(!forward_progress)) {
660 if (unlikely(++retries > 10000))
661 goto fatal_mondo_timeout;
662
663 /* Delay a little bit to let other cpus catch up
664 * on their cpu mondo queue work.
665 */
666 udelay(2 * cnt);
667 }
668 } while (1);
669
670 local_irq_restore(flags);
671
672 if (unlikely(!cpus_empty(error_mask)))
673 goto fatal_mondo_cpu_error;
674
675 return;
676
677fatal_mondo_cpu_error:
678 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
679 "were in error state\n",
680 this_cpu);
681 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
682 for_each_cpu_mask(i, error_mask)
683 printk("%d ", i);
684 printk("]\n");
685 return;
686
687fatal_mondo_timeout:
688 local_irq_restore(flags);
689 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
690 " progress after %d retries.\n",
691 this_cpu, retries);
692 goto dump_cpu_list_and_out;
693
694fatal_mondo_error:
695 local_irq_restore(flags);
696 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
697 this_cpu, status);
698 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
699 "mondo_block_pa(%lx)\n",
700 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
701
702dump_cpu_list_and_out:
703 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
704 for (i = 0; i < cnt; i++)
705 printk("%u ", cpu_list[i]);
706 printk("]\n");
707}
708
547/* Send cross call to all processors mentioned in MASK 709/* Send cross call to all processors mentioned in MASK
548 * except self. 710 * except self.
549 */ 711 */
@@ -557,8 +719,10 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
557 719
558 if (tlb_type == spitfire) 720 if (tlb_type == spitfire)
559 spitfire_xcall_deliver(data0, data1, data2, mask); 721 spitfire_xcall_deliver(data0, data1, data2, mask);
560 else 722 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
561 cheetah_xcall_deliver(data0, data1, data2, mask); 723 cheetah_xcall_deliver(data0, data1, data2, mask);
724 else
725 hypervisor_xcall_deliver(data0, data1, data2, mask);
562 /* NOTE: Caller runs local copy on master. */ 726 /* NOTE: Caller runs local copy on master. */
563 727
564 put_cpu(); 728 put_cpu();
@@ -594,16 +758,13 @@ extern unsigned long xcall_call_function;
594 * You must not call this function with disabled interrupts or from a 758 * You must not call this function with disabled interrupts or from a
595 * hardware interrupt handler or from a bottom half handler. 759 * hardware interrupt handler or from a bottom half handler.
596 */ 760 */
597int smp_call_function(void (*func)(void *info), void *info, 761static int smp_call_function_mask(void (*func)(void *info), void *info,
598 int nonatomic, int wait) 762 int nonatomic, int wait, cpumask_t mask)
599{ 763{
600 struct call_data_struct data; 764 struct call_data_struct data;
601 int cpus = num_online_cpus() - 1; 765 int cpus;
602 long timeout; 766 long timeout;
603 767
604 if (!cpus)
605 return 0;
606
607 /* Can deadlock when called with interrupts disabled */ 768 /* Can deadlock when called with interrupts disabled */
608 WARN_ON(irqs_disabled()); 769 WARN_ON(irqs_disabled());
609 770
@@ -614,9 +775,14 @@ int smp_call_function(void (*func)(void *info), void *info,
614 775
615 spin_lock(&call_lock); 776 spin_lock(&call_lock);
616 777
778 cpu_clear(smp_processor_id(), mask);
779 cpus = cpus_weight(mask);
780 if (!cpus)
781 goto out_unlock;
782
617 call_data = &data; 783 call_data = &data;
618 784
619 smp_cross_call(&xcall_call_function, 0, 0, 0); 785 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
620 786
621 /* 787 /*
622 * Wait for other cpus to complete function or at 788 * Wait for other cpus to complete function or at
@@ -630,18 +796,25 @@ int smp_call_function(void (*func)(void *info), void *info,
630 udelay(1); 796 udelay(1);
631 } 797 }
632 798
799out_unlock:
633 spin_unlock(&call_lock); 800 spin_unlock(&call_lock);
634 801
635 return 0; 802 return 0;
636 803
637out_timeout: 804out_timeout:
638 spin_unlock(&call_lock); 805 spin_unlock(&call_lock);
639 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n", 806 printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
640 (long) num_online_cpus() - 1L, 807 cpus, atomic_read(&data.finished));
641 (long) atomic_read(&data.finished));
642 return 0; 808 return 0;
643} 809}
644 810
811int smp_call_function(void (*func)(void *info), void *info,
812 int nonatomic, int wait)
813{
814 return smp_call_function_mask(func, info, nonatomic, wait,
815 cpu_online_map);
816}
817
645void smp_call_function_client(int irq, struct pt_regs *regs) 818void smp_call_function_client(int irq, struct pt_regs *regs)
646{ 819{
647 void (*func) (void *info) = call_data->func; 820 void (*func) (void *info) = call_data->func;
@@ -659,13 +832,25 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
659 } 832 }
660} 833}
661 834
835static void tsb_sync(void *info)
836{
837 struct mm_struct *mm = info;
838
839 if (current->active_mm == mm)
840 tsb_context_switch(mm);
841}
842
843void smp_tsb_sync(struct mm_struct *mm)
844{
845 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
846}
847
662extern unsigned long xcall_flush_tlb_mm; 848extern unsigned long xcall_flush_tlb_mm;
663extern unsigned long xcall_flush_tlb_pending; 849extern unsigned long xcall_flush_tlb_pending;
664extern unsigned long xcall_flush_tlb_kernel_range; 850extern unsigned long xcall_flush_tlb_kernel_range;
665extern unsigned long xcall_flush_tlb_all_spitfire;
666extern unsigned long xcall_flush_tlb_all_cheetah;
667extern unsigned long xcall_report_regs; 851extern unsigned long xcall_report_regs;
668extern unsigned long xcall_receive_signal; 852extern unsigned long xcall_receive_signal;
853extern unsigned long xcall_new_mmu_context_version;
669 854
670#ifdef DCACHE_ALIASING_POSSIBLE 855#ifdef DCACHE_ALIASING_POSSIBLE
671extern unsigned long xcall_flush_dcache_page_cheetah; 856extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -693,11 +878,17 @@ static __inline__ void __local_flush_dcache_page(struct page *page)
693void smp_flush_dcache_page_impl(struct page *page, int cpu) 878void smp_flush_dcache_page_impl(struct page *page, int cpu)
694{ 879{
695 cpumask_t mask = cpumask_of_cpu(cpu); 880 cpumask_t mask = cpumask_of_cpu(cpu);
696 int this_cpu = get_cpu(); 881 int this_cpu;
882
883 if (tlb_type == hypervisor)
884 return;
697 885
698#ifdef CONFIG_DEBUG_DCFLUSH 886#ifdef CONFIG_DEBUG_DCFLUSH
699 atomic_inc(&dcpage_flushes); 887 atomic_inc(&dcpage_flushes);
700#endif 888#endif
889
890 this_cpu = get_cpu();
891
701 if (cpu == this_cpu) { 892 if (cpu == this_cpu) {
702 __local_flush_dcache_page(page); 893 __local_flush_dcache_page(page);
703 } else if (cpu_online(cpu)) { 894 } else if (cpu_online(cpu)) {
@@ -713,7 +904,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
713 __pa(pg_addr), 904 __pa(pg_addr),
714 (u64) pg_addr, 905 (u64) pg_addr,
715 mask); 906 mask);
716 } else { 907 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
717#ifdef DCACHE_ALIASING_POSSIBLE 908#ifdef DCACHE_ALIASING_POSSIBLE
718 data0 = 909 data0 =
719 ((u64)&xcall_flush_dcache_page_cheetah); 910 ((u64)&xcall_flush_dcache_page_cheetah);
@@ -735,7 +926,12 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
735 void *pg_addr = page_address(page); 926 void *pg_addr = page_address(page);
736 cpumask_t mask = cpu_online_map; 927 cpumask_t mask = cpu_online_map;
737 u64 data0; 928 u64 data0;
738 int this_cpu = get_cpu(); 929 int this_cpu;
930
931 if (tlb_type == hypervisor)
932 return;
933
934 this_cpu = get_cpu();
739 935
740 cpu_clear(this_cpu, mask); 936 cpu_clear(this_cpu, mask);
741 937
@@ -752,7 +948,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
752 __pa(pg_addr), 948 __pa(pg_addr),
753 (u64) pg_addr, 949 (u64) pg_addr,
754 mask); 950 mask);
755 } else { 951 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
756#ifdef DCACHE_ALIASING_POSSIBLE 952#ifdef DCACHE_ALIASING_POSSIBLE
757 data0 = ((u64)&xcall_flush_dcache_page_cheetah); 953 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
758 cheetah_xcall_deliver(data0, 954 cheetah_xcall_deliver(data0,
@@ -769,38 +965,58 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
769 put_cpu(); 965 put_cpu();
770} 966}
771 967
968static void __smp_receive_signal_mask(cpumask_t mask)
969{
970 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
971}
972
772void smp_receive_signal(int cpu) 973void smp_receive_signal(int cpu)
773{ 974{
774 cpumask_t mask = cpumask_of_cpu(cpu); 975 cpumask_t mask = cpumask_of_cpu(cpu);
775 976
776 if (cpu_online(cpu)) { 977 if (cpu_online(cpu))
777 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff); 978 __smp_receive_signal_mask(mask);
778
779 if (tlb_type == spitfire)
780 spitfire_xcall_deliver(data0, 0, 0, mask);
781 else
782 cheetah_xcall_deliver(data0, 0, 0, mask);
783 }
784} 979}
785 980
786void smp_receive_signal_client(int irq, struct pt_regs *regs) 981void smp_receive_signal_client(int irq, struct pt_regs *regs)
787{ 982{
788 /* Just return, rtrap takes care of the rest. */
789 clear_softint(1 << irq); 983 clear_softint(1 << irq);
790} 984}
791 985
792void smp_report_regs(void) 986void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
793{ 987{
794 smp_cross_call(&xcall_report_regs, 0, 0, 0); 988 struct mm_struct *mm;
989 unsigned long flags;
990
991 clear_softint(1 << irq);
992
993 /* See if we need to allocate a new TLB context because
994 * the version of the one we are using is now out of date.
995 */
996 mm = current->active_mm;
997 if (unlikely(!mm || (mm == &init_mm)))
998 return;
999
1000 spin_lock_irqsave(&mm->context.lock, flags);
1001
1002 if (unlikely(!CTX_VALID(mm->context)))
1003 get_new_mmu_context(mm);
1004
1005 spin_unlock_irqrestore(&mm->context.lock, flags);
1006
1007 load_secondary_context(mm);
1008 __flush_tlb_mm(CTX_HWBITS(mm->context),
1009 SECONDARY_CONTEXT);
795} 1010}
796 1011
797void smp_flush_tlb_all(void) 1012void smp_new_mmu_context_version(void)
798{ 1013{
799 if (tlb_type == spitfire) 1014 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
800 smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0); 1015}
801 else 1016
802 smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0); 1017void smp_report_regs(void)
803 __flush_tlb_all(); 1018{
1019 smp_cross_call(&xcall_report_regs, 0, 0, 0);
804} 1020}
805 1021
806/* We know that the window frames of the user have been flushed 1022/* We know that the window frames of the user have been flushed
@@ -944,24 +1160,19 @@ void smp_release(void)
944 * can service tlb flush xcalls... 1160 * can service tlb flush xcalls...
945 */ 1161 */
946extern void prom_world(int); 1162extern void prom_world(int);
947extern void save_alternate_globals(unsigned long *); 1163
948extern void restore_alternate_globals(unsigned long *);
949void smp_penguin_jailcell(int irq, struct pt_regs *regs) 1164void smp_penguin_jailcell(int irq, struct pt_regs *regs)
950{ 1165{
951 unsigned long global_save[24];
952
953 clear_softint(1 << irq); 1166 clear_softint(1 << irq);
954 1167
955 preempt_disable(); 1168 preempt_disable();
956 1169
957 __asm__ __volatile__("flushw"); 1170 __asm__ __volatile__("flushw");
958 save_alternate_globals(global_save);
959 prom_world(1); 1171 prom_world(1);
960 atomic_inc(&smp_capture_registry); 1172 atomic_inc(&smp_capture_registry);
961 membar_storeload_storestore(); 1173 membar_storeload_storestore();
962 while (penguins_are_doing_time) 1174 while (penguins_are_doing_time)
963 rmb(); 1175 rmb();
964 restore_alternate_globals(global_save);
965 atomic_dec(&smp_capture_registry); 1176 atomic_dec(&smp_capture_registry);
966 prom_world(0); 1177 prom_world(0);
967 1178
@@ -1082,6 +1293,8 @@ int setup_profiling_timer(unsigned int multiplier)
1082/* Constrain the number of cpus to max_cpus. */ 1293/* Constrain the number of cpus to max_cpus. */
1083void __init smp_prepare_cpus(unsigned int max_cpus) 1294void __init smp_prepare_cpus(unsigned int max_cpus)
1084{ 1295{
1296 int i;
1297
1085 if (num_possible_cpus() > max_cpus) { 1298 if (num_possible_cpus() > max_cpus) {
1086 int instance, mid; 1299 int instance, mid;
1087 1300
@@ -1096,6 +1309,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1096 } 1309 }
1097 } 1310 }
1098 1311
1312 for_each_cpu(i) {
1313 if (tlb_type == hypervisor) {
1314 int j;
1315
1316 /* XXX get this mapping from machine description */
1317 for_each_cpu(j) {
1318 if ((j >> 2) == (i >> 2))
1319 cpu_set(j, cpu_sibling_map[i]);
1320 }
1321 } else {
1322 cpu_set(i, cpu_sibling_map[i]);
1323 }
1324 }
1325
1099 smp_store_cpu_info(boot_cpu_id); 1326 smp_store_cpu_info(boot_cpu_id);
1100} 1327}
1101 1328
@@ -1117,12 +1344,15 @@ void __init smp_setup_cpu_possible_map(void)
1117 1344
1118void __devinit smp_prepare_boot_cpu(void) 1345void __devinit smp_prepare_boot_cpu(void)
1119{ 1346{
1120 if (hard_smp_processor_id() >= NR_CPUS) { 1347 int cpu = hard_smp_processor_id();
1348
1349 if (cpu >= NR_CPUS) {
1121 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); 1350 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1122 prom_halt(); 1351 prom_halt();
1123 } 1352 }
1124 1353
1125 current_thread_info()->cpu = hard_smp_processor_id(); 1354 current_thread_info()->cpu = cpu;
1355 __local_per_cpu_offset = __per_cpu_offset(cpu);
1126 1356
1127 cpu_set(smp_processor_id(), cpu_online_map); 1357 cpu_set(smp_processor_id(), cpu_online_map);
1128 cpu_set(smp_processor_id(), phys_cpu_present_map); 1358 cpu_set(smp_processor_id(), phys_cpu_present_map);
@@ -1139,7 +1369,11 @@ int __devinit __cpu_up(unsigned int cpu)
1139 if (!cpu_isset(cpu, cpu_online_map)) { 1369 if (!cpu_isset(cpu, cpu_online_map)) {
1140 ret = -ENODEV; 1370 ret = -ENODEV;
1141 } else { 1371 } else {
1142 smp_synchronize_one_tick(cpu); 1372 /* On SUN4V, writes to %tick and %stick are
1373 * not allowed.
1374 */
1375 if (tlb_type != hypervisor)
1376 smp_synchronize_one_tick(cpu);
1143 } 1377 }
1144 } 1378 }
1145 return ret; 1379 return ret;
@@ -1183,12 +1417,9 @@ void __init setup_per_cpu_areas(void)
1183{ 1417{
1184 unsigned long goal, size, i; 1418 unsigned long goal, size, i;
1185 char *ptr; 1419 char *ptr;
1186 /* Created by linker magic */
1187 extern char __per_cpu_start[], __per_cpu_end[];
1188 1420
1189 /* Copy section for each CPU (we discard the original) */ 1421 /* Copy section for each CPU (we discard the original) */
1190 goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); 1422 goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
1191
1192#ifdef CONFIG_MODULES 1423#ifdef CONFIG_MODULES
1193 if (goal < PERCPU_ENOUGH_ROOM) 1424 if (goal < PERCPU_ENOUGH_ROOM)
1194 goal = PERCPU_ENOUGH_ROOM; 1425 goal = PERCPU_ENOUGH_ROOM;
@@ -1197,31 +1428,10 @@ void __init setup_per_cpu_areas(void)
1197 for (size = 1UL; size < goal; size <<= 1UL) 1428 for (size = 1UL; size < goal; size <<= 1UL)
1198 __per_cpu_shift++; 1429 __per_cpu_shift++;
1199 1430
1200 /* Make sure the resulting __per_cpu_base value 1431 ptr = alloc_bootmem(size * NR_CPUS);
1201 * will fit in the 43-bit sign extended IMMU
1202 * TSB register.
1203 */
1204 ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
1205 (unsigned long) __per_cpu_start);
1206 1432
1207 __per_cpu_base = ptr - __per_cpu_start; 1433 __per_cpu_base = ptr - __per_cpu_start;
1208 1434
1209 if ((__per_cpu_shift < PAGE_SHIFT) ||
1210 (__per_cpu_base & ~PAGE_MASK) ||
1211 (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
1212 prom_printf("PER_CPU: Invalid layout, "
1213 "ptr[%p] shift[%lx] base[%lx]\n",
1214 ptr, __per_cpu_shift, __per_cpu_base);
1215 prom_halt();
1216 }
1217
1218 for (i = 0; i < NR_CPUS; i++, ptr += size) 1435 for (i = 0; i < NR_CPUS; i++, ptr += size)
1219 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1436 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1220
1221 /* Finally, load in the boot cpu's base value.
1222 * We abuse the IMMU TSB register for trap handler
1223 * entry and exit loading of %g5. That is why it
1224 * has to be page aligned.
1225 */
1226 cpu_setup_percpu_base(hard_smp_processor_id());
1227} 1437}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 3c06bfb92a8c..9914a17651b4 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -95,9 +95,6 @@ extern int __ashrdi3(int, int);
95 95
96extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); 96extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
97 97
98extern unsigned long phys_base;
99extern unsigned long pfn_base;
100
101extern unsigned int sys_call_table[]; 98extern unsigned int sys_call_table[];
102 99
103extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 100extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
@@ -108,6 +105,14 @@ extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
108extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, 105extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
109 unsigned long *, unsigned long *, unsigned long *); 106 unsigned long *, unsigned long *, unsigned long *);
110 107
108extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
109extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
110 unsigned long *);
111extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
112 unsigned long *, unsigned long *);
113extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
114 unsigned long *, unsigned long *, unsigned long *);
115
111/* Per-CPU information table */ 116/* Per-CPU information table */
112EXPORT_PER_CPU_SYMBOL(__cpu_data); 117EXPORT_PER_CPU_SYMBOL(__cpu_data);
113 118
@@ -241,10 +246,6 @@ EXPORT_SYMBOL(verify_compat_iovec);
241#endif 246#endif
242 247
243EXPORT_SYMBOL(dump_fpu); 248EXPORT_SYMBOL(dump_fpu);
244EXPORT_SYMBOL(pte_alloc_one_kernel);
245#ifndef CONFIG_SMP
246EXPORT_SYMBOL(pgt_quicklists);
247#endif
248EXPORT_SYMBOL(put_fs_struct); 249EXPORT_SYMBOL(put_fs_struct);
249 250
250/* math-emu wants this */ 251/* math-emu wants this */
@@ -339,14 +340,10 @@ EXPORT_SYMBOL(copy_to_user_fixup);
339EXPORT_SYMBOL(copy_from_user_fixup); 340EXPORT_SYMBOL(copy_from_user_fixup);
340EXPORT_SYMBOL(copy_in_user_fixup); 341EXPORT_SYMBOL(copy_in_user_fixup);
341EXPORT_SYMBOL(__strncpy_from_user); 342EXPORT_SYMBOL(__strncpy_from_user);
342EXPORT_SYMBOL(__bzero_noasi); 343EXPORT_SYMBOL(__clear_user);
343 344
344/* Various address conversion macros use this. */ 345/* Various address conversion macros use this. */
345EXPORT_SYMBOL(phys_base);
346EXPORT_SYMBOL(pfn_base);
347EXPORT_SYMBOL(sparc64_valid_addr_bitmap); 346EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
348EXPORT_SYMBOL(page_to_pfn);
349EXPORT_SYMBOL(pfn_to_page);
350 347
351/* No version information on this, heavily used in inline asm, 348/* No version information on this, heavily used in inline asm,
352 * and will always be 'void __ret_efault(void)'. 349 * and will always be 'void __ret_efault(void)'.
@@ -392,4 +389,9 @@ EXPORT_SYMBOL(xor_vis_3);
392EXPORT_SYMBOL(xor_vis_4); 389EXPORT_SYMBOL(xor_vis_4);
393EXPORT_SYMBOL(xor_vis_5); 390EXPORT_SYMBOL(xor_vis_5);
394 391
392EXPORT_SYMBOL(xor_niagara_2);
393EXPORT_SYMBOL(xor_niagara_3);
394EXPORT_SYMBOL(xor_niagara_4);
395EXPORT_SYMBOL(xor_niagara_5);
396
395EXPORT_SYMBOL(prom_palette); 397EXPORT_SYMBOL(prom_palette);
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
new file mode 100644
index 000000000000..b49a68bdda43
--- /dev/null
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -0,0 +1,334 @@
1/* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6#include <asm/cpudata.h>
7#include <asm/intr_queue.h>
8
9 .text
10 .align 32
11
12sun4v_cpu_mondo:
13 /* Head offset in %g2, tail offset in %g4.
14 * If they are the same, no work.
15 */
16 mov INTRQ_CPU_MONDO_HEAD, %g2
17 ldxa [%g2] ASI_QUEUE, %g2
18 mov INTRQ_CPU_MONDO_TAIL, %g4
19 ldxa [%g4] ASI_QUEUE, %g4
20 cmp %g2, %g4
21 be,pn %xcc, sun4v_cpu_mondo_queue_empty
22 nop
23
24 /* Get &trap_block[smp_processor_id()] into %g3. */
25 ldxa [%g0] ASI_SCRATCHPAD, %g3
26 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
27
28 /* Get CPU mondo queue base phys address into %g7. */
29 ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
30
31 /* Now get the cross-call arguments and handler PC, same
32 * layout as sun4u:
33 *
34 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
35 * high half is context arg to MMU flushes, into %g5
36 * 2nd 64-bit word: 64-bit arg, load into %g1
37 * 3rd 64-bit word: 64-bit arg, load into %g7
38 */
39 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
40 add %g2, 0x8, %g2
41 srlx %g3, 32, %g5
42 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
43 add %g2, 0x8, %g2
44 srl %g3, 0, %g3
45 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
46 add %g2, 0x40 - 0x8 - 0x8, %g2
47
48 /* Update queue head pointer. */
49 sethi %hi(8192 - 1), %g4
50 or %g4, %lo(8192 - 1), %g4
51 and %g2, %g4, %g2
52
53 mov INTRQ_CPU_MONDO_HEAD, %g4
54 stxa %g2, [%g4] ASI_QUEUE
55 membar #Sync
56
57 jmpl %g3, %g0
58 nop
59
60sun4v_cpu_mondo_queue_empty:
61 retry
62
63sun4v_dev_mondo:
64 /* Head offset in %g2, tail offset in %g4. */
65 mov INTRQ_DEVICE_MONDO_HEAD, %g2
66 ldxa [%g2] ASI_QUEUE, %g2
67 mov INTRQ_DEVICE_MONDO_TAIL, %g4
68 ldxa [%g4] ASI_QUEUE, %g4
69 cmp %g2, %g4
70 be,pn %xcc, sun4v_dev_mondo_queue_empty
71 nop
72
73 /* Get &trap_block[smp_processor_id()] into %g3. */
74 ldxa [%g0] ASI_SCRATCHPAD, %g3
75 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
76
77 /* Get DEV mondo queue base phys address into %g5. */
78 ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
79
80 /* Load IVEC into %g3. */
81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
82 add %g2, 0x40, %g2
83
84 /* XXX There can be a full 64-byte block of data here.
85 * XXX This is how we can get at MSI vector data.
86 * XXX Current we do not capture this, but when we do we'll
87 * XXX need to add a 64-byte storage area in the struct ino_bucket
88 * XXX or the struct irq_desc.
89 */
90
91 /* Update queue head pointer, this frees up some registers. */
92 sethi %hi(8192 - 1), %g4
93 or %g4, %lo(8192 - 1), %g4
94 and %g2, %g4, %g2
95
96 mov INTRQ_DEVICE_MONDO_HEAD, %g4
97 stxa %g2, [%g4] ASI_QUEUE
98 membar #Sync
99
100 /* Get &__irq_work[smp_processor_id()] into %g1. */
101 TRAP_LOAD_IRQ_WORK(%g1, %g4)
102
103 /* Get &ivector_table[IVEC] into %g4. */
104 sethi %hi(ivector_table), %g4
105 sllx %g3, 5, %g3
106 or %g4, %lo(ivector_table), %g4
107 add %g4, %g3, %g4
108
109 /* Load IRQ %pil into %g5. */
110 ldub [%g4 + 0x04], %g5
111
112 /* Insert ivector_table[] entry into __irq_work[] queue. */
113 sllx %g5, 2, %g3
114 lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */
115 stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
116 stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */
117
118 /* Signal the interrupt by setting (1 << pil) in %softint. */
119 mov 1, %g2
120 sllx %g2, %g5, %g2
121 wr %g2, 0x0, %set_softint
122
123sun4v_dev_mondo_queue_empty:
124 retry
125
126sun4v_res_mondo:
127 /* Head offset in %g2, tail offset in %g4. */
128 mov INTRQ_RESUM_MONDO_HEAD, %g2
129 ldxa [%g2] ASI_QUEUE, %g2
130 mov INTRQ_RESUM_MONDO_TAIL, %g4
131 ldxa [%g4] ASI_QUEUE, %g4
132 cmp %g2, %g4
133 be,pn %xcc, sun4v_res_mondo_queue_empty
134 nop
135
136 /* Get &trap_block[smp_processor_id()] into %g3. */
137 ldxa [%g0] ASI_SCRATCHPAD, %g3
138 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
139
140 /* Get RES mondo queue base phys address into %g5. */
141 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
142
143 /* Get RES kernel buffer base phys address into %g7. */
144 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
145
146 /* If the first word is non-zero, queue is full. */
147 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
148 brnz,pn %g1, sun4v_res_mondo_queue_full
149 nop
150
151 /* Remember this entry's offset in %g1. */
152 mov %g2, %g1
153
154 /* Copy 64-byte queue entry into kernel buffer. */
155 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
156 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
157 add %g2, 0x08, %g2
158 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
159 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
160 add %g2, 0x08, %g2
161 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
162 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
163 add %g2, 0x08, %g2
164 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
165 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
166 add %g2, 0x08, %g2
167 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
168 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
169 add %g2, 0x08, %g2
170 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
171 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
172 add %g2, 0x08, %g2
173 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
174 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
175 add %g2, 0x08, %g2
176 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
177 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
178 add %g2, 0x08, %g2
179
180 /* Update queue head pointer. */
181 sethi %hi(8192 - 1), %g4
182 or %g4, %lo(8192 - 1), %g4
183 and %g2, %g4, %g2
184
185 mov INTRQ_RESUM_MONDO_HEAD, %g4
186 stxa %g2, [%g4] ASI_QUEUE
187 membar #Sync
188
189 /* Disable interrupts and save register state so we can call
190 * C code. The etrap handling will leave %g4 in %l4 for us
191 * when it's done.
192 */
193 rdpr %pil, %g2
194 wrpr %g0, 15, %pil
195 mov %g1, %g4
196 ba,pt %xcc, etrap_irq
197 rd %pc, %g7
198
199 /* Log the event. */
200 add %sp, PTREGS_OFF, %o0
201 call sun4v_resum_error
202 mov %l4, %o1
203
204 /* Return from trap. */
205 ba,pt %xcc, rtrap_irq
206 nop
207
208sun4v_res_mondo_queue_empty:
209 retry
210
211sun4v_res_mondo_queue_full:
212 /* The queue is full, consolidate our damage by setting
213 * the head equal to the tail. We'll just trap again otherwise.
214 * Call C code to log the event.
215 */
216 mov INTRQ_RESUM_MONDO_HEAD, %g2
217 stxa %g4, [%g2] ASI_QUEUE
218 membar #Sync
219
220 rdpr %pil, %g2
221 wrpr %g0, 15, %pil
222 ba,pt %xcc, etrap_irq
223 rd %pc, %g7
224
225 call sun4v_resum_overflow
226 add %sp, PTREGS_OFF, %o0
227
228 ba,pt %xcc, rtrap_irq
229 nop
230
231sun4v_nonres_mondo:
232 /* Head offset in %g2, tail offset in %g4. */
233 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
234 ldxa [%g2] ASI_QUEUE, %g2
235 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
236 ldxa [%g4] ASI_QUEUE, %g4
237 cmp %g2, %g4
238 be,pn %xcc, sun4v_nonres_mondo_queue_empty
239 nop
240
241 /* Get &trap_block[smp_processor_id()] into %g3. */
242 ldxa [%g0] ASI_SCRATCHPAD, %g3
243 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
244
245 /* Get RES mondo queue base phys address into %g5. */
246 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
247
248 /* Get RES kernel buffer base phys address into %g7. */
249 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
250
251 /* If the first word is non-zero, queue is full. */
252 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
253 brnz,pn %g1, sun4v_nonres_mondo_queue_full
254 nop
255
256 /* Remember this entry's offset in %g1. */
257 mov %g2, %g1
258
259 /* Copy 64-byte queue entry into kernel buffer. */
260 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
261 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
262 add %g2, 0x08, %g2
263 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
264 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
265 add %g2, 0x08, %g2
266 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
267 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
268 add %g2, 0x08, %g2
269 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
270 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
271 add %g2, 0x08, %g2
272 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
273 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
274 add %g2, 0x08, %g2
275 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
276 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
277 add %g2, 0x08, %g2
278 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
279 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
280 add %g2, 0x08, %g2
281 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
282 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
283 add %g2, 0x08, %g2
284
285 /* Update queue head pointer. */
286 sethi %hi(8192 - 1), %g4
287 or %g4, %lo(8192 - 1), %g4
288 and %g2, %g4, %g2
289
290 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
291 stxa %g2, [%g4] ASI_QUEUE
292 membar #Sync
293
294 /* Disable interrupts and save register state so we can call
295 * C code. The etrap handling will leave %g4 in %l4 for us
296 * when it's done.
297 */
298 rdpr %pil, %g2
299 wrpr %g0, 15, %pil
300 mov %g1, %g4
301 ba,pt %xcc, etrap_irq
302 rd %pc, %g7
303
304 /* Log the event. */
305 add %sp, PTREGS_OFF, %o0
306 call sun4v_nonresum_error
307 mov %l4, %o1
308
309 /* Return from trap. */
310 ba,pt %xcc, rtrap_irq
311 nop
312
313sun4v_nonres_mondo_queue_empty:
314 retry
315
316sun4v_nonres_mondo_queue_full:
317 /* The queue is full, consolidate our damage by setting
318 * the head equal to the tail. We'll just trap again otherwise.
319 * Call C code to log the event.
320 */
321 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
322 stxa %g4, [%g2] ASI_QUEUE
323 membar #Sync
324
325 rdpr %pil, %g2
326 wrpr %g0, 15, %pil
327 ba,pt %xcc, etrap_irq
328 rd %pc, %g7
329
330 call sun4v_nonresum_overflow
331 add %sp, PTREGS_OFF, %o0
332
333 ba,pt %xcc, rtrap_irq
334 nop
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
new file mode 100644
index 000000000000..ab23ddb7116e
--- /dev/null
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -0,0 +1,421 @@
1/* sun4v_tlb_miss.S: Sun4v TLB miss handlers.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6 .text
7 .align 32
8
9 /* Load ITLB fault information into VADDR and CTX, using BASE. */
10#define LOAD_ITLB_INFO(BASE, VADDR, CTX) \
11 ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \
12 ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX;
13
14 /* Load DTLB fault information into VADDR and CTX, using BASE. */
15#define LOAD_DTLB_INFO(BASE, VADDR, CTX) \
16 ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \
17 ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX;
18
19 /* DEST = (VADDR >> 22)
20 *
21 * Branch to ZERO_CTX_LABEL if context is zero.
22 */
23#define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \
24 srlx VADDR, 22, DEST; \
25 brz,pn CTX, ZERO_CTX_LABEL; \
26 nop;
27
28 /* Create TSB pointer. This is something like:
29 *
30 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
31 * tsb_base = tsb_reg & ~0x7UL;
32 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
33 * tsb_ptr = tsb_base + (tsb_index * 16);
34 */
35#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \
36 and TSB_PTR, 0x7, TMP1; \
37 mov 512, TMP2; \
38 andn TSB_PTR, 0x7, TSB_PTR; \
39 sllx TMP2, TMP1, TMP2; \
40 srlx VADDR, PAGE_SHIFT, TMP1; \
41 sub TMP2, 1, TMP2; \
42 and TMP1, TMP2, TMP1; \
43 sllx TMP1, 4, TMP1; \
44 add TSB_PTR, TMP1, TSB_PTR;
45
46sun4v_itlb_miss:
47 /* Load MMU Miss base into %g2. */
48 ldxa [%g0] ASI_SCRATCHPAD, %g2
49
50 /* Load UTSB reg into %g1. */
51 mov SCRATCHPAD_UTSBREG1, %g1
52 ldxa [%g1] ASI_SCRATCHPAD, %g1
53
54 LOAD_ITLB_INFO(%g2, %g4, %g5)
55 COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
56 COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
57
58 /* Load TSB tag/pte into %g2/%g3 and compare the tag. */
59 ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
60 cmp %g2, %g6
61 bne,a,pn %xcc, tsb_miss_page_table_walk
62 mov FAULT_CODE_ITLB, %g3
63 andcc %g3, _PAGE_EXEC_4V, %g0
64 be,a,pn %xcc, tsb_do_fault
65 mov FAULT_CODE_ITLB, %g3
66
67 /* We have a valid entry, make hypervisor call to load
68 * I-TLB and return from trap.
69 *
70 * %g3: PTE
71 * %g4: vaddr
72 */
73sun4v_itlb_load:
74 ldxa [%g0] ASI_SCRATCHPAD, %g6
75 mov %o0, %g1 ! save %o0
76 mov %o1, %g2 ! save %o1
77 mov %o2, %g5 ! save %o2
78 mov %o3, %g7 ! save %o3
79 mov %g4, %o0 ! vaddr
80 ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 ! ctx
81 mov %g3, %o2 ! PTE
82 mov HV_MMU_IMMU, %o3 ! flags
83 ta HV_MMU_MAP_ADDR_TRAP
84 brnz,pn %o0, sun4v_itlb_error
85 mov %g2, %o1 ! restore %o1
86 mov %g1, %o0 ! restore %o0
87 mov %g5, %o2 ! restore %o2
88 mov %g7, %o3 ! restore %o3
89
90 retry
91
92sun4v_dtlb_miss:
93 /* Load MMU Miss base into %g2. */
94 ldxa [%g0] ASI_SCRATCHPAD, %g2
95
96 /* Load UTSB reg into %g1. */
97 mov SCRATCHPAD_UTSBREG1, %g1
98 ldxa [%g1] ASI_SCRATCHPAD, %g1
99
100 LOAD_DTLB_INFO(%g2, %g4, %g5)
101 COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
102 COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
103
104 /* Load TSB tag/pte into %g2/%g3 and compare the tag. */
105 ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
106 cmp %g2, %g6
107 bne,a,pn %xcc, tsb_miss_page_table_walk
108 mov FAULT_CODE_DTLB, %g3
109
110 /* We have a valid entry, make hypervisor call to load
111 * D-TLB and return from trap.
112 *
113 * %g3: PTE
114 * %g4: vaddr
115 */
116sun4v_dtlb_load:
117 ldxa [%g0] ASI_SCRATCHPAD, %g6
118 mov %o0, %g1 ! save %o0
119 mov %o1, %g2 ! save %o1
120 mov %o2, %g5 ! save %o2
121 mov %o3, %g7 ! save %o3
122 mov %g4, %o0 ! vaddr
123 ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 ! ctx
124 mov %g3, %o2 ! PTE
125 mov HV_MMU_DMMU, %o3 ! flags
126 ta HV_MMU_MAP_ADDR_TRAP
127 brnz,pn %o0, sun4v_dtlb_error
128 mov %g2, %o1 ! restore %o1
129 mov %g1, %o0 ! restore %o0
130 mov %g5, %o2 ! restore %o2
131 mov %g7, %o3 ! restore %o3
132
133 retry
134
135sun4v_dtlb_prot:
136 SET_GL(1)
137
138 /* Load MMU Miss base into %g5. */
139 ldxa [%g0] ASI_SCRATCHPAD, %g5
140
141 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
142 rdpr %tl, %g1
143 cmp %g1, 1
144 bgu,pn %xcc, winfix_trampoline
145 nop
146 ba,pt %xcc, sparc64_realfault_common
147 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
148
149 /* Called from trap table:
150 * %g4: vaddr
151 * %g5: context
152 * %g6: TAG TARGET
153 */
154sun4v_itsb_miss:
155 mov SCRATCHPAD_UTSBREG1, %g1
156 ldxa [%g1] ASI_SCRATCHPAD, %g1
157 brz,pn %g5, kvmap_itlb_4v
158 mov FAULT_CODE_ITLB, %g3
159 ba,a,pt %xcc, sun4v_tsb_miss_common
160
161 /* Called from trap table:
162 * %g4: vaddr
163 * %g5: context
164 * %g6: TAG TARGET
165 */
166sun4v_dtsb_miss:
167 mov SCRATCHPAD_UTSBREG1, %g1
168 ldxa [%g1] ASI_SCRATCHPAD, %g1
169 brz,pn %g5, kvmap_dtlb_4v
170 mov FAULT_CODE_DTLB, %g3
171
172 /* fallthrough */
173
174 /* Create TSB pointer into %g1. This is something like:
175 *
176 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
177 * tsb_base = tsb_reg & ~0x7UL;
178 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
179 * tsb_ptr = tsb_base + (tsb_index * 16);
180 */
181sun4v_tsb_miss_common:
182 COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7)
183
184 /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS
185 * still in %g2, so it's quite trivial to get at the PGD PHYS value
186 * so we can preload it into %g7.
187 */
188 sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
189 ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath
190 ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
191
192sun4v_itlb_error:
193 sethi %hi(sun4v_err_itlb_vaddr), %g1
194 stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
195 sethi %hi(sun4v_err_itlb_ctx), %g1
196 ldxa [%g0] ASI_SCRATCHPAD, %g6
197 ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1
198 stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)]
199 sethi %hi(sun4v_err_itlb_pte), %g1
200 stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)]
201 sethi %hi(sun4v_err_itlb_error), %g1
202 stx %o0, [%g1 + %lo(sun4v_err_itlb_error)]
203
204 rdpr %tl, %g4
205 cmp %g4, 1
206 ble,pt %icc, 1f
207 sethi %hi(2f), %g7
208 ba,pt %xcc, etraptl1
209 or %g7, %lo(2f), %g7
210
2111: ba,pt %xcc, etrap
2122: or %g7, %lo(2b), %g7
213 call sun4v_itlb_error_report
214 add %sp, PTREGS_OFF, %o0
215
216 /* NOTREACHED */
217
218sun4v_dtlb_error:
219 sethi %hi(sun4v_err_dtlb_vaddr), %g1
220 stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
221 sethi %hi(sun4v_err_dtlb_ctx), %g1
222 ldxa [%g0] ASI_SCRATCHPAD, %g6
223 ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1
224 stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)]
225 sethi %hi(sun4v_err_dtlb_pte), %g1
226 stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)]
227 sethi %hi(sun4v_err_dtlb_error), %g1
228 stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)]
229
230 rdpr %tl, %g4
231 cmp %g4, 1
232 ble,pt %icc, 1f
233 sethi %hi(2f), %g7
234 ba,pt %xcc, etraptl1
235 or %g7, %lo(2f), %g7
236
2371: ba,pt %xcc, etrap
2382: or %g7, %lo(2b), %g7
239 call sun4v_dtlb_error_report
240 add %sp, PTREGS_OFF, %o0
241
242 /* NOTREACHED */
243
244 /* Instruction Access Exception, tl0. */
245sun4v_iacc:
246 ldxa [%g0] ASI_SCRATCHPAD, %g2
247 ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
248 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
249 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
250 sllx %g3, 16, %g3
251 or %g5, %g3, %g5
252 ba,pt %xcc, etrap
253 rd %pc, %g7
254 mov %l4, %o1
255 mov %l5, %o2
256 call sun4v_insn_access_exception
257 add %sp, PTREGS_OFF, %o0
258 ba,a,pt %xcc, rtrap_clr_l6
259
260 /* Instruction Access Exception, tl1. */
261sun4v_iacc_tl1:
262 ldxa [%g0] ASI_SCRATCHPAD, %g2
263 ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
264 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
265 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
266 sllx %g3, 16, %g3
267 or %g5, %g3, %g5
268 ba,pt %xcc, etraptl1
269 rd %pc, %g7
270 mov %l4, %o1
271 mov %l5, %o2
272 call sun4v_insn_access_exception_tl1
273 add %sp, PTREGS_OFF, %o0
274 ba,a,pt %xcc, rtrap_clr_l6
275
276 /* Data Access Exception, tl0. */
277sun4v_dacc:
278 ldxa [%g0] ASI_SCRATCHPAD, %g2
279 ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
280 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
281 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
282 sllx %g3, 16, %g3
283 or %g5, %g3, %g5
284 ba,pt %xcc, etrap
285 rd %pc, %g7
286 mov %l4, %o1
287 mov %l5, %o2
288 call sun4v_data_access_exception
289 add %sp, PTREGS_OFF, %o0
290 ba,a,pt %xcc, rtrap_clr_l6
291
292 /* Data Access Exception, tl1. */
293sun4v_dacc_tl1:
294 ldxa [%g0] ASI_SCRATCHPAD, %g2
295 ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
296 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
297 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
298 sllx %g3, 16, %g3
299 or %g5, %g3, %g5
300 ba,pt %xcc, etraptl1
301 rd %pc, %g7
302 mov %l4, %o1
303 mov %l5, %o2
304 call sun4v_data_access_exception_tl1
305 add %sp, PTREGS_OFF, %o0
306 ba,a,pt %xcc, rtrap_clr_l6
307
308 /* Memory Address Unaligned. */
309sun4v_mna:
310 /* Window fixup? */
311 rdpr %tl, %g2
312 cmp %g2, 1
313 ble,pt %icc, 1f
314 nop
315
316 SET_GL(1)
317 ldxa [%g0] ASI_SCRATCHPAD, %g2
318 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
319 mov HV_FAULT_TYPE_UNALIGNED, %g3
320 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g4
321 sllx %g3, 16, %g3
322 or %g4, %g3, %g4
323 ba,pt %xcc, winfix_mna
324 rdpr %tpc, %g3
325 /* not reached */
326
3271: ldxa [%g0] ASI_SCRATCHPAD, %g2
328 mov HV_FAULT_TYPE_UNALIGNED, %g3
329 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
330 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
331 sllx %g3, 16, %g3
332 or %g5, %g3, %g5
333
334 ba,pt %xcc, etrap
335 rd %pc, %g7
336 mov %l4, %o1
337 mov %l5, %o2
338 call sun4v_do_mna
339 add %sp, PTREGS_OFF, %o0
340 ba,a,pt %xcc, rtrap_clr_l6
341
342 /* Privileged Action. */
343sun4v_privact:
344 ba,pt %xcc, etrap
345 rd %pc, %g7
346 call do_privact
347 add %sp, PTREGS_OFF, %o0
348 ba,a,pt %xcc, rtrap_clr_l6
349
350 /* Unaligned ldd float, tl0. */
351sun4v_lddfmna:
352 ldxa [%g0] ASI_SCRATCHPAD, %g2
353 ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
354 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
355 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
356 sllx %g3, 16, %g3
357 or %g5, %g3, %g5
358 ba,pt %xcc, etrap
359 rd %pc, %g7
360 mov %l4, %o1
361 mov %l5, %o2
362 call handle_lddfmna
363 add %sp, PTREGS_OFF, %o0
364 ba,a,pt %xcc, rtrap_clr_l6
365
366 /* Unaligned std float, tl0. */
367sun4v_stdfmna:
368 ldxa [%g0] ASI_SCRATCHPAD, %g2
369 ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
370 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
371 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
372 sllx %g3, 16, %g3
373 or %g5, %g3, %g5
374 ba,pt %xcc, etrap
375 rd %pc, %g7
376 mov %l4, %o1
377 mov %l5, %o2
378 call handle_stdfmna
379 add %sp, PTREGS_OFF, %o0
380 ba,a,pt %xcc, rtrap_clr_l6
381
382#define BRANCH_ALWAYS 0x10680000
383#define NOP 0x01000000
384#define SUN4V_DO_PATCH(OLD, NEW) \
385 sethi %hi(NEW), %g1; \
386 or %g1, %lo(NEW), %g1; \
387 sethi %hi(OLD), %g2; \
388 or %g2, %lo(OLD), %g2; \
389 sub %g1, %g2, %g1; \
390 sethi %hi(BRANCH_ALWAYS), %g3; \
391 sll %g1, 11, %g1; \
392 srl %g1, 11 + 2, %g1; \
393 or %g3, %lo(BRANCH_ALWAYS), %g3; \
394 or %g3, %g1, %g3; \
395 stw %g3, [%g2]; \
396 sethi %hi(NOP), %g3; \
397 or %g3, %lo(NOP), %g3; \
398 stw %g3, [%g2 + 0x4]; \
399 flush %g2;
400
401 .globl sun4v_patch_tlb_handlers
402 .type sun4v_patch_tlb_handlers,#function
403sun4v_patch_tlb_handlers:
404 SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss)
405 SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss)
406 SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss)
407 SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss)
408 SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot)
409 SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot)
410 SUN4V_DO_PATCH(tl0_iax, sun4v_iacc)
411 SUN4V_DO_PATCH(tl1_iax, sun4v_iacc_tl1)
412 SUN4V_DO_PATCH(tl0_dax, sun4v_dacc)
413 SUN4V_DO_PATCH(tl1_dax, sun4v_dacc_tl1)
414 SUN4V_DO_PATCH(tl0_mna, sun4v_mna)
415 SUN4V_DO_PATCH(tl1_mna, sun4v_mna)
416 SUN4V_DO_PATCH(tl0_lddfmna, sun4v_lddfmna)
417 SUN4V_DO_PATCH(tl0_stdfmna, sun4v_stdfmna)
418 SUN4V_DO_PATCH(tl0_privact, sun4v_privact)
419 retl
420 nop
421 .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 5f8c822a2b4a..7a869138c37f 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -25,25 +25,93 @@
25#include <linux/syscalls.h> 25#include <linux/syscalls.h>
26#include <linux/ipc.h> 26#include <linux/ipc.h>
27#include <linux/personality.h> 27#include <linux/personality.h>
28#include <linux/random.h>
28 29
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/ipc.h> 31#include <asm/ipc.h>
31#include <asm/utrap.h> 32#include <asm/utrap.h>
32#include <asm/perfctr.h> 33#include <asm/perfctr.h>
34#include <asm/a.out.h>
33 35
34/* #define DEBUG_UNIMP_SYSCALL */ 36/* #define DEBUG_UNIMP_SYSCALL */
35 37
36/* XXX Make this per-binary type, this way we can detect the type of
37 * XXX a binary. Every Sparc executable calls this very early on.
38 */
39asmlinkage unsigned long sys_getpagesize(void) 38asmlinkage unsigned long sys_getpagesize(void)
40{ 39{
41 return PAGE_SIZE; 40 return PAGE_SIZE;
42} 41}
43 42
44#define COLOUR_ALIGN(addr,pgoff) \ 43#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
45 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 44#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
46 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 45
46/* Does addr --> addr+len fall within 4GB of the VA-space hole or
47 * overflow past the end of the 64-bit address space?
48 */
49static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
50{
51 unsigned long va_exclude_start, va_exclude_end;
52
53 va_exclude_start = VA_EXCLUDE_START;
54 va_exclude_end = VA_EXCLUDE_END;
55
56 if (unlikely(len >= va_exclude_start))
57 return 1;
58
59 if (unlikely((addr + len) < addr))
60 return 1;
61
62 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
63 ((addr + len) >= va_exclude_start &&
64 (addr + len) < va_exclude_end)))
65 return 1;
66
67 return 0;
68}
69
70/* Does start,end straddle the VA-space hole? */
71static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
72{
73 unsigned long va_exclude_start, va_exclude_end;
74
75 va_exclude_start = VA_EXCLUDE_START;
76 va_exclude_end = VA_EXCLUDE_END;
77
78 if (likely(start < va_exclude_start && end < va_exclude_start))
79 return 0;
80
81 if (likely(start >= va_exclude_end && end >= va_exclude_end))
82 return 0;
83
84 return 1;
85}
86
87/* These functions differ from the default implementations in
88 * mm/mmap.c in two ways:
89 *
90 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
91 * for fixed such mappings we just validate what the user gave us.
92 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
93 * the spitfire/niagara VA-hole.
94 */
95
96static inline unsigned long COLOUR_ALIGN(unsigned long addr,
97 unsigned long pgoff)
98{
99 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
100 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
101
102 return base + off;
103}
104
105static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
106 unsigned long pgoff)
107{
108 unsigned long base = addr & ~(SHMLBA-1);
109 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
110
111 if (base + off <= addr)
112 return base + off;
113 return base - off;
114}
47 115
48unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 116unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
49{ 117{
@@ -64,8 +132,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
64 } 132 }
65 133
66 if (test_thread_flag(TIF_32BIT)) 134 if (test_thread_flag(TIF_32BIT))
67 task_size = 0xf0000000UL; 135 task_size = STACK_TOP32;
68 if (len > task_size || len > -PAGE_OFFSET) 136 if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
69 return -ENOMEM; 137 return -ENOMEM;
70 138
71 do_color_align = 0; 139 do_color_align = 0;
@@ -84,11 +152,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
84 return addr; 152 return addr;
85 } 153 }
86 154
87 if (len <= mm->cached_hole_size) { 155 if (len > mm->cached_hole_size) {
156 start_addr = addr = mm->free_area_cache;
157 } else {
158 start_addr = addr = TASK_UNMAPPED_BASE;
88 mm->cached_hole_size = 0; 159 mm->cached_hole_size = 0;
89 mm->free_area_cache = TASK_UNMAPPED_BASE;
90 } 160 }
91 start_addr = addr = mm->free_area_cache;
92 161
93 task_size -= len; 162 task_size -= len;
94 163
@@ -100,11 +169,12 @@ full_search:
100 169
101 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 170 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
102 /* At this point: (!vma || addr < vma->vm_end). */ 171 /* At this point: (!vma || addr < vma->vm_end). */
103 if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) { 172 if (addr < VA_EXCLUDE_START &&
104 addr = PAGE_OFFSET; 173 (addr + len) >= VA_EXCLUDE_START) {
105 vma = find_vma(mm, PAGE_OFFSET); 174 addr = VA_EXCLUDE_END;
175 vma = find_vma(mm, VA_EXCLUDE_END);
106 } 176 }
107 if (task_size < addr) { 177 if (unlikely(task_size < addr)) {
108 if (start_addr != TASK_UNMAPPED_BASE) { 178 if (start_addr != TASK_UNMAPPED_BASE) {
109 start_addr = addr = TASK_UNMAPPED_BASE; 179 start_addr = addr = TASK_UNMAPPED_BASE;
110 mm->cached_hole_size = 0; 180 mm->cached_hole_size = 0;
@@ -112,7 +182,7 @@ full_search:
112 } 182 }
113 return -ENOMEM; 183 return -ENOMEM;
114 } 184 }
115 if (!vma || addr + len <= vma->vm_start) { 185 if (likely(!vma || addr + len <= vma->vm_start)) {
116 /* 186 /*
117 * Remember the place where we stopped the search: 187 * Remember the place where we stopped the search:
118 */ 188 */
@@ -128,6 +198,121 @@ full_search:
128 } 198 }
129} 199}
130 200
201unsigned long
202arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
203 const unsigned long len, const unsigned long pgoff,
204 const unsigned long flags)
205{
206 struct vm_area_struct *vma;
207 struct mm_struct *mm = current->mm;
208 unsigned long task_size = STACK_TOP32;
209 unsigned long addr = addr0;
210 int do_color_align;
211
212 /* This should only ever run for 32-bit processes. */
213 BUG_ON(!test_thread_flag(TIF_32BIT));
214
215 if (flags & MAP_FIXED) {
216 /* We do not accept a shared mapping if it would violate
217 * cache aliasing constraints.
218 */
219 if ((flags & MAP_SHARED) &&
220 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
221 return -EINVAL;
222 return addr;
223 }
224
225 if (unlikely(len > task_size))
226 return -ENOMEM;
227
228 do_color_align = 0;
229 if (filp || (flags & MAP_SHARED))
230 do_color_align = 1;
231
232 /* requesting a specific address */
233 if (addr) {
234 if (do_color_align)
235 addr = COLOUR_ALIGN(addr, pgoff);
236 else
237 addr = PAGE_ALIGN(addr);
238
239 vma = find_vma(mm, addr);
240 if (task_size - len >= addr &&
241 (!vma || addr + len <= vma->vm_start))
242 return addr;
243 }
244
245 /* check if free_area_cache is useful for us */
246 if (len <= mm->cached_hole_size) {
247 mm->cached_hole_size = 0;
248 mm->free_area_cache = mm->mmap_base;
249 }
250
251 /* either no address requested or can't fit in requested address hole */
252 addr = mm->free_area_cache;
253 if (do_color_align) {
254 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
255
256 addr = base + len;
257 }
258
259 /* make sure it can fit in the remaining address space */
260 if (likely(addr > len)) {
261 vma = find_vma(mm, addr-len);
262 if (!vma || addr <= vma->vm_start) {
263 /* remember the address as a hint for next time */
264 return (mm->free_area_cache = addr-len);
265 }
266 }
267
268 if (unlikely(mm->mmap_base < len))
269 goto bottomup;
270
271 addr = mm->mmap_base-len;
272 if (do_color_align)
273 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
274
275 do {
276 /*
277 * Lookup failure means no vma is above this address,
278 * else if new region fits below vma->vm_start,
279 * return with success:
280 */
281 vma = find_vma(mm, addr);
282 if (likely(!vma || addr+len <= vma->vm_start)) {
283 /* remember the address as a hint for next time */
284 return (mm->free_area_cache = addr);
285 }
286
287 /* remember the largest hole we saw so far */
288 if (addr + mm->cached_hole_size < vma->vm_start)
289 mm->cached_hole_size = vma->vm_start - addr;
290
291 /* try just below the current vma->vm_start */
292 addr = vma->vm_start-len;
293 if (do_color_align)
294 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
295 } while (likely(len < vma->vm_start));
296
297bottomup:
298 /*
299 * A failed mmap() very likely causes application failure,
300 * so fall back to the bottom-up function here. This scenario
301 * can happen with large stack limits and large mmap()
302 * allocations.
303 */
304 mm->cached_hole_size = ~0UL;
305 mm->free_area_cache = TASK_UNMAPPED_BASE;
306 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
307 /*
308 * Restore the topdown base:
309 */
310 mm->free_area_cache = mm->mmap_base;
311 mm->cached_hole_size = ~0UL;
312
313 return addr;
314}
315
131/* Try to align mapping such that we align it as much as possible. */ 316/* Try to align mapping such that we align it as much as possible. */
132unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) 317unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
133{ 318{
@@ -171,15 +356,57 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
171 return addr; 356 return addr;
172} 357}
173 358
359/* Essentially the same as PowerPC... */
360void arch_pick_mmap_layout(struct mm_struct *mm)
361{
362 unsigned long random_factor = 0UL;
363
364 if (current->flags & PF_RANDOMIZE) {
365 random_factor = get_random_int();
366 if (test_thread_flag(TIF_32BIT))
367 random_factor &= ((1 * 1024 * 1024) - 1);
368 else
369 random_factor = ((random_factor << PAGE_SHIFT) &
370 0xffffffffUL);
371 }
372
373 /*
374 * Fall back to the standard layout if the personality
375 * bit is set, or if the expected stack growth is unlimited:
376 */
377 if (!test_thread_flag(TIF_32BIT) ||
378 (current->personality & ADDR_COMPAT_LAYOUT) ||
379 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
380 sysctl_legacy_va_layout) {
381 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
382 mm->get_unmapped_area = arch_get_unmapped_area;
383 mm->unmap_area = arch_unmap_area;
384 } else {
385 /* We know it's 32-bit */
386 unsigned long task_size = STACK_TOP32;
387 unsigned long gap;
388
389 gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
390 if (gap < 128 * 1024 * 1024)
391 gap = 128 * 1024 * 1024;
392 if (gap > (task_size / 6 * 5))
393 gap = (task_size / 6 * 5);
394
395 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
396 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
397 mm->unmap_area = arch_unmap_area_topdown;
398 }
399}
400
174asmlinkage unsigned long sparc_brk(unsigned long brk) 401asmlinkage unsigned long sparc_brk(unsigned long brk)
175{ 402{
176 /* People could try to be nasty and use ta 0x6d in 32bit programs */ 403 /* People could try to be nasty and use ta 0x6d in 32bit programs */
177 if (test_thread_flag(TIF_32BIT) && 404 if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
178 brk >= 0xf0000000UL)
179 return current->mm->brk; 405 return current->mm->brk;
180 406
181 if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET)) 407 if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
182 return current->mm->brk; 408 return current->mm->brk;
409
183 return sys_brk(brk); 410 return sys_brk(brk);
184} 411}
185 412
@@ -340,13 +567,16 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
340 retval = -EINVAL; 567 retval = -EINVAL;
341 568
342 if (test_thread_flag(TIF_32BIT)) { 569 if (test_thread_flag(TIF_32BIT)) {
343 if (len > 0xf0000000UL || 570 if (len >= STACK_TOP32)
344 ((flags & MAP_FIXED) && addr > 0xf0000000UL - len)) 571 goto out_putf;
572
573 if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
345 goto out_putf; 574 goto out_putf;
346 } else { 575 } else {
347 if (len > -PAGE_OFFSET || 576 if (len >= VA_EXCLUDE_START)
348 ((flags & MAP_FIXED) && 577 goto out_putf;
349 addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) 578
579 if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
350 goto out_putf; 580 goto out_putf;
351 } 581 }
352 582
@@ -365,9 +595,9 @@ asmlinkage long sys64_munmap(unsigned long addr, size_t len)
365{ 595{
366 long ret; 596 long ret;
367 597
368 if (len > -PAGE_OFFSET || 598 if (invalid_64bit_range(addr, len))
369 (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
370 return -EINVAL; 599 return -EINVAL;
600
371 down_write(&current->mm->mmap_sem); 601 down_write(&current->mm->mmap_sem);
372 ret = do_munmap(current->mm, addr, len); 602 ret = do_munmap(current->mm, addr, len);
373 up_write(&current->mm->mmap_sem); 603 up_write(&current->mm->mmap_sem);
@@ -384,18 +614,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr,
384{ 614{
385 struct vm_area_struct *vma; 615 struct vm_area_struct *vma;
386 unsigned long ret = -EINVAL; 616 unsigned long ret = -EINVAL;
617
387 if (test_thread_flag(TIF_32BIT)) 618 if (test_thread_flag(TIF_32BIT))
388 goto out; 619 goto out;
389 if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET) 620 if (unlikely(new_len >= VA_EXCLUDE_START))
390 goto out; 621 goto out;
391 if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET) 622 if (unlikely(invalid_64bit_range(addr, old_len)))
392 goto out; 623 goto out;
624
393 down_write(&current->mm->mmap_sem); 625 down_write(&current->mm->mmap_sem);
394 if (flags & MREMAP_FIXED) { 626 if (flags & MREMAP_FIXED) {
395 if (new_addr < PAGE_OFFSET && 627 if (invalid_64bit_range(new_addr, new_len))
396 new_addr + new_len > -PAGE_OFFSET)
397 goto out_sem; 628 goto out_sem;
398 } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) { 629 } else if (invalid_64bit_range(addr, new_len)) {
399 unsigned long map_flags = 0; 630 unsigned long map_flags = 0;
400 struct file *file = NULL; 631 struct file *file = NULL;
401 632
@@ -554,12 +785,10 @@ asmlinkage long sys_utrap_install(utrap_entry_t type,
554 } 785 }
555 if (!current_thread_info()->utraps) { 786 if (!current_thread_info()->utraps) {
556 current_thread_info()->utraps = 787 current_thread_info()->utraps =
557 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); 788 kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
558 if (!current_thread_info()->utraps) 789 if (!current_thread_info()->utraps)
559 return -ENOMEM; 790 return -ENOMEM;
560 current_thread_info()->utraps[0] = 1; 791 current_thread_info()->utraps[0] = 1;
561 memset(current_thread_info()->utraps+1, 0,
562 UT_TRAP_INSTRUCTION_31*sizeof(long));
563 } else { 792 } else {
564 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && 793 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
565 current_thread_info()->utraps[0] > 1) { 794 current_thread_info()->utraps[0] > 1) {
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 417727bd87ba..0e41df024489 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -62,6 +62,7 @@
62#include <asm/fpumacro.h> 62#include <asm/fpumacro.h>
63#include <asm/semaphore.h> 63#include <asm/semaphore.h>
64#include <asm/mmu_context.h> 64#include <asm/mmu_context.h>
65#include <asm/a.out.h>
65 66
66asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group) 67asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
67{ 68{
@@ -1039,15 +1040,15 @@ asmlinkage unsigned long sys32_mremap(unsigned long addr,
1039 unsigned long ret = -EINVAL; 1040 unsigned long ret = -EINVAL;
1040 unsigned long new_addr = __new_addr; 1041 unsigned long new_addr = __new_addr;
1041 1042
1042 if (old_len > 0xf0000000UL || new_len > 0xf0000000UL) 1043 if (old_len > STACK_TOP32 || new_len > STACK_TOP32)
1043 goto out; 1044 goto out;
1044 if (addr > 0xf0000000UL - old_len) 1045 if (addr > STACK_TOP32 - old_len)
1045 goto out; 1046 goto out;
1046 down_write(&current->mm->mmap_sem); 1047 down_write(&current->mm->mmap_sem);
1047 if (flags & MREMAP_FIXED) { 1048 if (flags & MREMAP_FIXED) {
1048 if (new_addr > 0xf0000000UL - new_len) 1049 if (new_addr > STACK_TOP32 - new_len)
1049 goto out_sem; 1050 goto out_sem;
1050 } else if (addr > 0xf0000000UL - new_len) { 1051 } else if (addr > STACK_TOP32 - new_len) {
1051 unsigned long map_flags = 0; 1052 unsigned long map_flags = 0;
1052 struct file *file = NULL; 1053 struct file *file = NULL;
1053 1054
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index a22930d62adf..7d61f1bfd3d3 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -30,6 +30,8 @@
30#include <linux/cpufreq.h> 30#include <linux/cpufreq.h>
31#include <linux/percpu.h> 31#include <linux/percpu.h>
32#include <linux/profile.h> 32#include <linux/profile.h>
33#include <linux/miscdevice.h>
34#include <linux/rtc.h>
33 35
34#include <asm/oplib.h> 36#include <asm/oplib.h>
35#include <asm/mostek.h> 37#include <asm/mostek.h>
@@ -45,6 +47,7 @@
45#include <asm/smp.h> 47#include <asm/smp.h>
46#include <asm/sections.h> 48#include <asm/sections.h>
47#include <asm/cpudata.h> 49#include <asm/cpudata.h>
50#include <asm/uaccess.h>
48 51
49DEFINE_SPINLOCK(mostek_lock); 52DEFINE_SPINLOCK(mostek_lock);
50DEFINE_SPINLOCK(rtc_lock); 53DEFINE_SPINLOCK(rtc_lock);
@@ -193,16 +196,22 @@ struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
193 196
194static void stick_init_tick(unsigned long offset) 197static void stick_init_tick(unsigned long offset)
195{ 198{
196 tick_disable_protection(); 199 /* Writes to the %tick and %stick register are not
197 200 * allowed on sun4v. The Hypervisor controls that
198 /* Let the user get at STICK too. */ 201 * bit, per-strand.
199 __asm__ __volatile__( 202 */
200 " rd %%asr24, %%g2\n" 203 if (tlb_type != hypervisor) {
201 " andn %%g2, %0, %%g2\n" 204 tick_disable_protection();
202 " wr %%g2, 0, %%asr24" 205
203 : /* no outputs */ 206 /* Let the user get at STICK too. */
204 : "r" (TICK_PRIV_BIT) 207 __asm__ __volatile__(
205 : "g1", "g2"); 208 " rd %%asr24, %%g2\n"
209 " andn %%g2, %0, %%g2\n"
210 " wr %%g2, 0, %%asr24"
211 : /* no outputs */
212 : "r" (TICK_PRIV_BIT)
213 : "g1", "g2");
214 }
206 215
207 __asm__ __volatile__( 216 __asm__ __volatile__(
208 " rd %%asr24, %%g1\n" 217 " rd %%asr24, %%g1\n"
@@ -683,6 +692,83 @@ static void __init set_system_time(void)
683 } 692 }
684} 693}
685 694
695/* davem suggests we keep this within the 4M locked kernel image */
696static u32 starfire_get_time(void)
697{
698 static char obp_gettod[32];
699 static u32 unix_tod;
700
701 sprintf(obp_gettod, "h# %08x unix-gettod",
702 (unsigned int) (long) &unix_tod);
703 prom_feval(obp_gettod);
704
705 return unix_tod;
706}
707
708static int starfire_set_time(u32 val)
709{
710 /* Do nothing, time is set using the service processor
711 * console on this platform.
712 */
713 return 0;
714}
715
716static u32 hypervisor_get_time(void)
717{
718 register unsigned long func asm("%o5");
719 register unsigned long arg0 asm("%o0");
720 register unsigned long arg1 asm("%o1");
721 int retries = 10000;
722
723retry:
724 func = HV_FAST_TOD_GET;
725 arg0 = 0;
726 arg1 = 0;
727 __asm__ __volatile__("ta %6"
728 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
729 : "0" (func), "1" (arg0), "2" (arg1),
730 "i" (HV_FAST_TRAP));
731 if (arg0 == HV_EOK)
732 return arg1;
733 if (arg0 == HV_EWOULDBLOCK) {
734 if (--retries > 0) {
735 udelay(100);
736 goto retry;
737 }
738 printk(KERN_WARNING "SUN4V: tod_get() timed out.\n");
739 return 0;
740 }
741 printk(KERN_WARNING "SUN4V: tod_get() not supported.\n");
742 return 0;
743}
744
745static int hypervisor_set_time(u32 secs)
746{
747 register unsigned long func asm("%o5");
748 register unsigned long arg0 asm("%o0");
749 int retries = 10000;
750
751retry:
752 func = HV_FAST_TOD_SET;
753 arg0 = secs;
754 __asm__ __volatile__("ta %4"
755 : "=&r" (func), "=&r" (arg0)
756 : "0" (func), "1" (arg0),
757 "i" (HV_FAST_TRAP));
758 if (arg0 == HV_EOK)
759 return 0;
760 if (arg0 == HV_EWOULDBLOCK) {
761 if (--retries > 0) {
762 udelay(100);
763 goto retry;
764 }
765 printk(KERN_WARNING "SUN4V: tod_set() timed out.\n");
766 return -EAGAIN;
767 }
768 printk(KERN_WARNING "SUN4V: tod_set() not supported.\n");
769 return -EOPNOTSUPP;
770}
771
686void __init clock_probe(void) 772void __init clock_probe(void)
687{ 773{
688 struct linux_prom_registers clk_reg[2]; 774 struct linux_prom_registers clk_reg[2];
@@ -702,14 +788,14 @@ void __init clock_probe(void)
702 788
703 789
704 if (this_is_starfire) { 790 if (this_is_starfire) {
705 /* davem suggests we keep this within the 4M locked kernel image */ 791 xtime.tv_sec = starfire_get_time();
706 static char obp_gettod[256]; 792 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
707 static u32 unix_tod; 793 set_normalized_timespec(&wall_to_monotonic,
708 794 -xtime.tv_sec, -xtime.tv_nsec);
709 sprintf(obp_gettod, "h# %08x unix-gettod", 795 return;
710 (unsigned int) (long) &unix_tod); 796 }
711 prom_feval(obp_gettod); 797 if (tlb_type == hypervisor) {
712 xtime.tv_sec = unix_tod; 798 xtime.tv_sec = hypervisor_get_time();
713 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); 799 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
714 set_normalized_timespec(&wall_to_monotonic, 800 set_normalized_timespec(&wall_to_monotonic,
715 -xtime.tv_sec, -xtime.tv_nsec); 801 -xtime.tv_sec, -xtime.tv_nsec);
@@ -981,11 +1067,10 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg
981} 1067}
982 1068
983struct freq_table { 1069struct freq_table {
984 unsigned long udelay_val_ref;
985 unsigned long clock_tick_ref; 1070 unsigned long clock_tick_ref;
986 unsigned int ref_freq; 1071 unsigned int ref_freq;
987}; 1072};
988static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 }; 1073static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 };
989 1074
990unsigned long sparc64_get_clock_tick(unsigned int cpu) 1075unsigned long sparc64_get_clock_tick(unsigned int cpu)
991{ 1076{
@@ -1007,16 +1092,11 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
1007 1092
1008 if (!ft->ref_freq) { 1093 if (!ft->ref_freq) {
1009 ft->ref_freq = freq->old; 1094 ft->ref_freq = freq->old;
1010 ft->udelay_val_ref = cpu_data(cpu).udelay_val;
1011 ft->clock_tick_ref = cpu_data(cpu).clock_tick; 1095 ft->clock_tick_ref = cpu_data(cpu).clock_tick;
1012 } 1096 }
1013 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || 1097 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
1014 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || 1098 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
1015 (val == CPUFREQ_RESUMECHANGE)) { 1099 (val == CPUFREQ_RESUMECHANGE)) {
1016 cpu_data(cpu).udelay_val =
1017 cpufreq_scale(ft->udelay_val_ref,
1018 ft->ref_freq,
1019 freq->new);
1020 cpu_data(cpu).clock_tick = 1100 cpu_data(cpu).clock_tick =
1021 cpufreq_scale(ft->clock_tick_ref, 1101 cpufreq_scale(ft->clock_tick_ref,
1022 ft->ref_freq, 1102 ft->ref_freq,
@@ -1179,3 +1259,246 @@ static int set_rtc_mmss(unsigned long nowtime)
1179 return retval; 1259 return retval;
1180 } 1260 }
1181} 1261}
1262
1263#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
1264static unsigned char mini_rtc_status; /* bitmapped status byte. */
1265
1266/* months start at 0 now */
1267static unsigned char days_in_mo[] =
1268{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
1269
1270#define FEBRUARY 2
1271#define STARTOFTIME 1970
1272#define SECDAY 86400L
1273#define SECYR (SECDAY * 365)
1274#define leapyear(year) ((year) % 4 == 0 && \
1275 ((year) % 100 != 0 || (year) % 400 == 0))
1276#define days_in_year(a) (leapyear(a) ? 366 : 365)
1277#define days_in_month(a) (month_days[(a) - 1])
1278
1279static int month_days[12] = {
1280 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1281};
1282
1283/*
1284 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1285 */
1286static void GregorianDay(struct rtc_time * tm)
1287{
1288 int leapsToDate;
1289 int lastYear;
1290 int day;
1291 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1292
1293 lastYear = tm->tm_year - 1;
1294
1295 /*
1296 * Number of leap corrections to apply up to end of last year
1297 */
1298 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1299
1300 /*
1301 * This year is a leap year if it is divisible by 4 except when it is
1302 * divisible by 100 unless it is divisible by 400
1303 *
1304 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1305 */
1306 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1307
1308 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1309 tm->tm_mday;
1310
1311 tm->tm_wday = day % 7;
1312}
1313
1314static void to_tm(int tim, struct rtc_time *tm)
1315{
1316 register int i;
1317 register long hms, day;
1318
1319 day = tim / SECDAY;
1320 hms = tim % SECDAY;
1321
1322 /* Hours, minutes, seconds are easy */
1323 tm->tm_hour = hms / 3600;
1324 tm->tm_min = (hms % 3600) / 60;
1325 tm->tm_sec = (hms % 3600) % 60;
1326
1327 /* Number of years in days */
1328 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1329 day -= days_in_year(i);
1330 tm->tm_year = i;
1331
1332 /* Number of months in days left */
1333 if (leapyear(tm->tm_year))
1334 days_in_month(FEBRUARY) = 29;
1335 for (i = 1; day >= days_in_month(i); i++)
1336 day -= days_in_month(i);
1337 days_in_month(FEBRUARY) = 28;
1338 tm->tm_mon = i;
1339
1340 /* Days are what is left over (+1) from all that. */
1341 tm->tm_mday = day + 1;
1342
1343 /*
1344 * Determine the day of week
1345 */
1346 GregorianDay(tm);
1347}
1348
1349/* Both Starfire and SUN4V give us seconds since Jan 1st, 1970,
1350 * aka Unix time. So we have to convert to/from rtc_time.
1351 */
1352static inline void mini_get_rtc_time(struct rtc_time *time)
1353{
1354 unsigned long flags;
1355 u32 seconds;
1356
1357 spin_lock_irqsave(&rtc_lock, flags);
1358 seconds = 0;
1359 if (this_is_starfire)
1360 seconds = starfire_get_time();
1361 else if (tlb_type == hypervisor)
1362 seconds = hypervisor_get_time();
1363 spin_unlock_irqrestore(&rtc_lock, flags);
1364
1365 to_tm(seconds, time);
1366 time->tm_year -= 1900;
1367 time->tm_mon -= 1;
1368}
1369
1370static inline int mini_set_rtc_time(struct rtc_time *time)
1371{
1372 u32 seconds = mktime(time->tm_year + 1900, time->tm_mon + 1,
1373 time->tm_mday, time->tm_hour,
1374 time->tm_min, time->tm_sec);
1375 unsigned long flags;
1376 int err;
1377
1378 spin_lock_irqsave(&rtc_lock, flags);
1379 err = -ENODEV;
1380 if (this_is_starfire)
1381 err = starfire_set_time(seconds);
1382 else if (tlb_type == hypervisor)
1383 err = hypervisor_set_time(seconds);
1384 spin_unlock_irqrestore(&rtc_lock, flags);
1385
1386 return err;
1387}
1388
1389static int mini_rtc_ioctl(struct inode *inode, struct file *file,
1390 unsigned int cmd, unsigned long arg)
1391{
1392 struct rtc_time wtime;
1393 void __user *argp = (void __user *)arg;
1394
1395 switch (cmd) {
1396
1397 case RTC_PLL_GET:
1398 return -EINVAL;
1399
1400 case RTC_PLL_SET:
1401 return -EINVAL;
1402
1403 case RTC_UIE_OFF: /* disable ints from RTC updates. */
1404 return 0;
1405
1406 case RTC_UIE_ON: /* enable ints for RTC updates. */
1407 return -EINVAL;
1408
1409 case RTC_RD_TIME: /* Read the time/date from RTC */
1410 /* this doesn't get week-day, who cares */
1411 memset(&wtime, 0, sizeof(wtime));
1412 mini_get_rtc_time(&wtime);
1413
1414 return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0;
1415
1416 case RTC_SET_TIME: /* Set the RTC */
1417 {
1418 int year;
1419 unsigned char leap_yr;
1420
1421 if (!capable(CAP_SYS_TIME))
1422 return -EACCES;
1423
1424 if (copy_from_user(&wtime, argp, sizeof(wtime)))
1425 return -EFAULT;
1426
1427 year = wtime.tm_year + 1900;
1428 leap_yr = ((!(year % 4) && (year % 100)) ||
1429 !(year % 400));
1430
1431 if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1))
1432 return -EINVAL;
1433
1434 if (wtime.tm_mday < 0 || wtime.tm_mday >
1435 (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
1436 return -EINVAL;
1437
1438 if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
1439 wtime.tm_min < 0 || wtime.tm_min >= 60 ||
1440 wtime.tm_sec < 0 || wtime.tm_sec >= 60)
1441 return -EINVAL;
1442
1443 return mini_set_rtc_time(&wtime);
1444 }
1445 }
1446
1447 return -EINVAL;
1448}
1449
1450static int mini_rtc_open(struct inode *inode, struct file *file)
1451{
1452 if (mini_rtc_status & RTC_IS_OPEN)
1453 return -EBUSY;
1454
1455 mini_rtc_status |= RTC_IS_OPEN;
1456
1457 return 0;
1458}
1459
1460static int mini_rtc_release(struct inode *inode, struct file *file)
1461{
1462 mini_rtc_status &= ~RTC_IS_OPEN;
1463 return 0;
1464}
1465
1466
1467static struct file_operations mini_rtc_fops = {
1468 .owner = THIS_MODULE,
1469 .ioctl = mini_rtc_ioctl,
1470 .open = mini_rtc_open,
1471 .release = mini_rtc_release,
1472};
1473
1474static struct miscdevice rtc_mini_dev =
1475{
1476 .minor = RTC_MINOR,
1477 .name = "rtc",
1478 .fops = &mini_rtc_fops,
1479};
1480
1481static int __init rtc_mini_init(void)
1482{
1483 int retval;
1484
1485 if (tlb_type != hypervisor && !this_is_starfire)
1486 return -ENODEV;
1487
1488 printk(KERN_INFO "Mini RTC Driver\n");
1489
1490 retval = misc_register(&rtc_mini_dev);
1491 if (retval < 0)
1492 return retval;
1493
1494 return 0;
1495}
1496
1497static void __exit rtc_mini_exit(void)
1498{
1499 misc_deregister(&rtc_mini_dev);
1500}
1501
1502
1503module_init(rtc_mini_init);
1504module_exit(rtc_mini_exit);
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 9478551cb020..a4dc01a3d238 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -16,6 +16,8 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/thread_info.h> 17#include <asm/thread_info.h>
18#include <asm/mmu.h> 18#include <asm/mmu.h>
19#include <asm/hypervisor.h>
20#include <asm/cpudata.h>
19 21
20 .data 22 .data
21 .align 8 23 .align 8
@@ -28,14 +30,19 @@ itlb_load:
28dtlb_load: 30dtlb_load:
29 .asciz "SUNW,dtlb-load" 31 .asciz "SUNW,dtlb-load"
30 32
33 /* XXX __cpuinit this thing XXX */
34#define TRAMP_STACK_SIZE 1024
35 .align 16
36tramp_stack:
37 .skip TRAMP_STACK_SIZE
38
31 .text 39 .text
32 .align 8 40 .align 8
33 .globl sparc64_cpu_startup, sparc64_cpu_startup_end 41 .globl sparc64_cpu_startup, sparc64_cpu_startup_end
34sparc64_cpu_startup: 42sparc64_cpu_startup:
35 flushw 43 BRANCH_IF_SUN4V(g1, niagara_startup)
36 44 BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
37 BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup) 45 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
38 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup)
39 46
40 ba,pt %xcc, spitfire_startup 47 ba,pt %xcc, spitfire_startup
41 nop 48 nop
@@ -55,6 +62,7 @@ cheetah_startup:
55 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 62 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
56 stxa %g5, [%g0] ASI_DCU_CONTROL_REG 63 stxa %g5, [%g0] ASI_DCU_CONTROL_REG
57 membar #Sync 64 membar #Sync
65 /* fallthru */
58 66
59cheetah_generic_startup: 67cheetah_generic_startup:
60 mov TSB_EXTENSION_P, %g3 68 mov TSB_EXTENSION_P, %g3
@@ -70,7 +78,9 @@ cheetah_generic_startup:
70 stxa %g0, [%g3] ASI_DMMU 78 stxa %g0, [%g3] ASI_DMMU
71 stxa %g0, [%g3] ASI_IMMU 79 stxa %g0, [%g3] ASI_IMMU
72 membar #Sync 80 membar #Sync
81 /* fallthru */
73 82
83niagara_startup:
74 /* Disable STICK_INT interrupts. */ 84 /* Disable STICK_INT interrupts. */
75 sethi %hi(0x80000000), %g5 85 sethi %hi(0x80000000), %g5
76 sllx %g5, 32, %g5 86 sllx %g5, 32, %g5
@@ -85,17 +95,17 @@ spitfire_startup:
85 membar #Sync 95 membar #Sync
86 96
87startup_continue: 97startup_continue:
88 wrpr %g0, 15, %pil
89
90 sethi %hi(0x80000000), %g2 98 sethi %hi(0x80000000), %g2
91 sllx %g2, 32, %g2 99 sllx %g2, 32, %g2
92 wr %g2, 0, %tick_cmpr 100 wr %g2, 0, %tick_cmpr
93 101
102 mov %o0, %l0
103
104 BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
105
94 /* Call OBP by hand to lock KERNBASE into i/d tlbs. 106 /* Call OBP by hand to lock KERNBASE into i/d tlbs.
95 * We lock 2 consequetive entries if we are 'bigkernel'. 107 * We lock 2 consequetive entries if we are 'bigkernel'.
96 */ 108 */
97 mov %o0, %l0
98
99 sethi %hi(prom_entry_lock), %g2 109 sethi %hi(prom_entry_lock), %g2
1001: ldstub [%g2 + %lo(prom_entry_lock)], %g1 1101: ldstub [%g2 + %lo(prom_entry_lock)], %g1
101 membar #StoreLoad | #StoreStore 111 membar #StoreLoad | #StoreStore
@@ -105,7 +115,6 @@ startup_continue:
105 sethi %hi(p1275buf), %g2 115 sethi %hi(p1275buf), %g2
106 or %g2, %lo(p1275buf), %g2 116 or %g2, %lo(p1275buf), %g2
107 ldx [%g2 + 0x10], %l2 117 ldx [%g2 + 0x10], %l2
108 mov %sp, %l1
109 add %l2, -(192 + 128), %sp 118 add %l2, -(192 + 128), %sp
110 flushw 119 flushw
111 120
@@ -142,8 +151,7 @@ startup_continue:
142 151
143 sethi %hi(bigkernel), %g2 152 sethi %hi(bigkernel), %g2
144 lduw [%g2 + %lo(bigkernel)], %g2 153 lduw [%g2 + %lo(bigkernel)], %g2
145 cmp %g2, 0 154 brz,pt %g2, do_dtlb
146 be,pt %icc, do_dtlb
147 nop 155 nop
148 156
149 sethi %hi(call_method), %g2 157 sethi %hi(call_method), %g2
@@ -214,8 +222,7 @@ do_dtlb:
214 222
215 sethi %hi(bigkernel), %g2 223 sethi %hi(bigkernel), %g2
216 lduw [%g2 + %lo(bigkernel)], %g2 224 lduw [%g2 + %lo(bigkernel)], %g2
217 cmp %g2, 0 225 brz,pt %g2, do_unlock
218 be,pt %icc, do_unlock
219 nop 226 nop
220 227
221 sethi %hi(call_method), %g2 228 sethi %hi(call_method), %g2
@@ -257,99 +264,180 @@ do_unlock:
257 stb %g0, [%g2 + %lo(prom_entry_lock)] 264 stb %g0, [%g2 + %lo(prom_entry_lock)]
258 membar #StoreStore | #StoreLoad 265 membar #StoreStore | #StoreLoad
259 266
260 mov %l1, %sp 267 ba,pt %xcc, after_lock_tlb
261 flushw 268 nop
269
270niagara_lock_tlb:
271 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
272 sethi %hi(KERNBASE), %o0
273 clr %o1
274 sethi %hi(kern_locked_tte_data), %o2
275 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
276 mov HV_MMU_IMMU, %o3
277 ta HV_FAST_TRAP
278
279 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
280 sethi %hi(KERNBASE), %o0
281 clr %o1
282 sethi %hi(kern_locked_tte_data), %o2
283 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
284 mov HV_MMU_DMMU, %o3
285 ta HV_FAST_TRAP
262 286
263 mov %l0, %o0 287 sethi %hi(bigkernel), %g2
288 lduw [%g2 + %lo(bigkernel)], %g2
289 brz,pt %g2, after_lock_tlb
290 nop
264 291
292 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
293 sethi %hi(KERNBASE + 0x400000), %o0
294 clr %o1
295 sethi %hi(kern_locked_tte_data), %o2
296 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
297 sethi %hi(0x400000), %o3
298 add %o2, %o3, %o2
299 mov HV_MMU_IMMU, %o3
300 ta HV_FAST_TRAP
301
302 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
303 sethi %hi(KERNBASE + 0x400000), %o0
304 clr %o1
305 sethi %hi(kern_locked_tte_data), %o2
306 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
307 sethi %hi(0x400000), %o3
308 add %o2, %o3, %o2
309 mov HV_MMU_DMMU, %o3
310 ta HV_FAST_TRAP
311
312after_lock_tlb:
265 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate 313 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
266 wr %g0, 0, %fprs 314 wr %g0, 0, %fprs
267 315
268 /* XXX Buggy PROM... */
269 srl %o0, 0, %o0
270 ldx [%o0], %g6
271
272 wr %g0, ASI_P, %asi 316 wr %g0, ASI_P, %asi
273 317
274 mov PRIMARY_CONTEXT, %g7 318 mov PRIMARY_CONTEXT, %g7
275 stxa %g0, [%g7] ASI_DMMU 319
320661: stxa %g0, [%g7] ASI_DMMU
321 .section .sun4v_1insn_patch, "ax"
322 .word 661b
323 stxa %g0, [%g7] ASI_MMU
324 .previous
325
276 membar #Sync 326 membar #Sync
277 mov SECONDARY_CONTEXT, %g7 327 mov SECONDARY_CONTEXT, %g7
278 stxa %g0, [%g7] ASI_DMMU 328
329661: stxa %g0, [%g7] ASI_DMMU
330 .section .sun4v_1insn_patch, "ax"
331 .word 661b
332 stxa %g0, [%g7] ASI_MMU
333 .previous
334
279 membar #Sync 335 membar #Sync
280 336
281 mov 1, %g5 337 /* Everything we do here, until we properly take over the
282 sllx %g5, THREAD_SHIFT, %g5 338 * trap table, must be done with extreme care. We cannot
283 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 339 * make any references to %g6 (current thread pointer),
284 add %g6, %g5, %sp 340 * %g4 (current task pointer), or %g5 (base of current cpu's
341 * per-cpu area) until we properly take over the trap table
342 * from the firmware and hypervisor.
343 *
344 * Get onto temporary stack which is in the locked kernel image.
345 */
346 sethi %hi(tramp_stack), %g1
347 or %g1, %lo(tramp_stack), %g1
348 add %g1, TRAMP_STACK_SIZE, %g1
349 sub %g1, STACKFRAME_SZ + STACK_BIAS, %sp
285 mov 0, %fp 350 mov 0, %fp
286 351
287 wrpr %g0, 0, %wstate 352 /* Put garbage in these registers to trap any access to them. */
288 wrpr %g0, 0, %tl 353 set 0xdeadbeef, %g4
354 set 0xdeadbeef, %g5
355 set 0xdeadbeef, %g6
289 356
290 /* Setup the trap globals, then we can resurface. */ 357 call init_irqwork_curcpu
291 rdpr %pstate, %o1 358 nop
292 mov %g6, %o2
293 wrpr %o1, PSTATE_AG, %pstate
294 sethi %hi(sparc64_ttable_tl0), %g5
295 wrpr %g5, %tba
296 mov %o2, %g6
297
298 wrpr %o1, PSTATE_MG, %pstate
299#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
300#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
301
302 mov TSB_REG, %g1
303 stxa %g0, [%g1] ASI_DMMU
304 membar #Sync
305 mov TLB_SFSR, %g1
306 sethi %uhi(KERN_HIGHBITS), %g2
307 or %g2, %ulo(KERN_HIGHBITS), %g2
308 sllx %g2, 32, %g2
309 or %g2, KERN_LOWBITS, %g2
310 359
311 BRANCH_IF_ANY_CHEETAH(g3,g7,9f) 360 sethi %hi(tlb_type), %g3
361 lduw [%g3 + %lo(tlb_type)], %g2
362 cmp %g2, 3
363 bne,pt %icc, 1f
364 nop
312 365
313 ba,pt %xcc, 1f 366 call hard_smp_processor_id
314 nop 367 nop
368
369 mov %o0, %o1
370 mov 0, %o0
371 mov 0, %o2
372 call sun4v_init_mondo_queues
373 mov 1, %o3
315 374
3169: 3751: call init_cur_cpu_trap
317 sethi %uhi(VPTE_BASE_CHEETAH), %g3 376 ldx [%l0], %o0
318 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 377
319 ba,pt %xcc, 2f 378 /* Start using proper page size encodings in ctx register. */
320 sllx %g3, 32, %g3 379 sethi %hi(sparc64_kern_pri_context), %g3
3211: 380 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
322 sethi %uhi(VPTE_BASE_SPITFIRE), %g3 381 mov PRIMARY_CONTEXT, %g1
323 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
324 sllx %g3, 32, %g3
325 382
3262: 383661: stxa %g2, [%g1] ASI_DMMU
327 clr %g7 384 .section .sun4v_1insn_patch, "ax"
328#undef KERN_HIGHBITS 385 .word 661b
329#undef KERN_LOWBITS 386 stxa %g2, [%g1] ASI_MMU
387 .previous
330 388
331 wrpr %o1, 0x0, %pstate 389 membar #Sync
332 ldx [%g6 + TI_TASK], %g4
333 390
334 wrpr %g0, 0, %wstate 391 wrpr %g0, 0, %wstate
335 392
336 call init_irqwork_curcpu 393 /* As a hack, put &init_thread_union into %g6.
394 * prom_world() loads from here to restore the %asi
395 * register.
396 */
397 sethi %hi(init_thread_union), %g6
398 or %g6, %lo(init_thread_union), %g6
399
400 sethi %hi(is_sun4v), %o0
401 lduw [%o0 + %lo(is_sun4v)], %o0
402 brz,pt %o0, 1f
337 nop 403 nop
338 404
339 /* Start using proper page size encodings in ctx register. */ 405 TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
340 sethi %hi(sparc64_kern_pri_context), %g3 406 add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
341 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 407 stxa %g2, [%g0] ASI_SCRATCHPAD
342 mov PRIMARY_CONTEXT, %g1 408
343 stxa %g2, [%g1] ASI_DMMU 409 /* Compute physical address:
344 membar #Sync 410 *
411 * paddr = kern_base + (mmfsa_vaddr - KERNBASE)
412 */
413 sethi %hi(KERNBASE), %g3
414 sub %g2, %g3, %g2
415 sethi %hi(kern_base), %g3
416 ldx [%g3 + %lo(kern_base)], %g3
417 add %g2, %g3, %o1
418
419 call prom_set_trap_table_sun4v
420 sethi %hi(sparc64_ttable_tl0), %o0
421
422 ba,pt %xcc, 2f
423 nop
424
4251: call prom_set_trap_table
426 sethi %hi(sparc64_ttable_tl0), %o0
427
4282: ldx [%l0], %g6
429 ldx [%g6 + TI_TASK], %g4
430
431 mov 1, %g5
432 sllx %g5, THREAD_SHIFT, %g5
433 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
434 add %g6, %g5, %sp
435 mov 0, %fp
345 436
346 rdpr %pstate, %o1 437 rdpr %pstate, %o1
347 or %o1, PSTATE_IE, %o1 438 or %o1, PSTATE_IE, %o1
348 wrpr %o1, 0, %pstate 439 wrpr %o1, 0, %pstate
349 440
350 call prom_set_trap_table
351 sethi %hi(sparc64_ttable_tl0), %o0
352
353 call smp_callin 441 call smp_callin
354 nop 442 nop
355 call cpu_idle 443 call cpu_idle
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 8d44ae5a15e3..7f7dba0ca96a 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -38,6 +38,7 @@
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/kdebug.h> 40#include <asm/kdebug.h>
41#include <asm/head.h>
41#ifdef CONFIG_KMOD 42#ifdef CONFIG_KMOD
42#include <linux/kmod.h> 43#include <linux/kmod.h>
43#endif 44#endif
@@ -72,12 +73,14 @@ struct tl1_traplog {
72 73
73static void dump_tl1_traplog(struct tl1_traplog *p) 74static void dump_tl1_traplog(struct tl1_traplog *p)
74{ 75{
75 int i; 76 int i, limit;
77
78 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
79 "dumping track stack.\n", p->tl);
76 80
77 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n", 81 limit = (tlb_type == hypervisor) ? 2 : 4;
78 p->tl); 82 for (i = 0; i < limit; i++) {
79 for (i = 0; i < 4; i++) { 83 printk(KERN_EMERG
80 printk(KERN_CRIT
81 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " 84 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
82 "TNPC[%016lx] TT[%lx]\n", 85 "TNPC[%016lx] TT[%lx]\n",
83 i + 1, 86 i + 1,
@@ -179,6 +182,45 @@ void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr
179 spitfire_insn_access_exception(regs, sfsr, sfar); 182 spitfire_insn_access_exception(regs, sfsr, sfar);
180} 183}
181 184
185void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
186{
187 unsigned short type = (type_ctx >> 16);
188 unsigned short ctx = (type_ctx & 0xffff);
189 siginfo_t info;
190
191 if (notify_die(DIE_TRAP, "instruction access exception", regs,
192 0, 0x8, SIGTRAP) == NOTIFY_STOP)
193 return;
194
195 if (regs->tstate & TSTATE_PRIV) {
196 printk("sun4v_insn_access_exception: ADDR[%016lx] "
197 "CTX[%04x] TYPE[%04x], going.\n",
198 addr, ctx, type);
199 die_if_kernel("Iax", regs);
200 }
201
202 if (test_thread_flag(TIF_32BIT)) {
203 regs->tpc &= 0xffffffff;
204 regs->tnpc &= 0xffffffff;
205 }
206 info.si_signo = SIGSEGV;
207 info.si_errno = 0;
208 info.si_code = SEGV_MAPERR;
209 info.si_addr = (void __user *) addr;
210 info.si_trapno = 0;
211 force_sig_info(SIGSEGV, &info, current);
212}
213
214void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
215{
216 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
217 0, 0x8, SIGTRAP) == NOTIFY_STOP)
218 return;
219
220 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
221 sun4v_insn_access_exception(regs, addr, type_ctx);
222}
223
182void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) 224void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
183{ 225{
184 siginfo_t info; 226 siginfo_t info;
@@ -227,6 +269,45 @@ void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr
227 spitfire_data_access_exception(regs, sfsr, sfar); 269 spitfire_data_access_exception(regs, sfsr, sfar);
228} 270}
229 271
272void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
273{
274 unsigned short type = (type_ctx >> 16);
275 unsigned short ctx = (type_ctx & 0xffff);
276 siginfo_t info;
277
278 if (notify_die(DIE_TRAP, "data access exception", regs,
279 0, 0x8, SIGTRAP) == NOTIFY_STOP)
280 return;
281
282 if (regs->tstate & TSTATE_PRIV) {
283 printk("sun4v_data_access_exception: ADDR[%016lx] "
284 "CTX[%04x] TYPE[%04x], going.\n",
285 addr, ctx, type);
286 die_if_kernel("Dax", regs);
287 }
288
289 if (test_thread_flag(TIF_32BIT)) {
290 regs->tpc &= 0xffffffff;
291 regs->tnpc &= 0xffffffff;
292 }
293 info.si_signo = SIGSEGV;
294 info.si_errno = 0;
295 info.si_code = SEGV_MAPERR;
296 info.si_addr = (void __user *) addr;
297 info.si_trapno = 0;
298 force_sig_info(SIGSEGV, &info, current);
299}
300
301void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
302{
303 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
304 0, 0x8, SIGTRAP) == NOTIFY_STOP)
305 return;
306
307 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
308 sun4v_data_access_exception(regs, addr, type_ctx);
309}
310
230#ifdef CONFIG_PCI 311#ifdef CONFIG_PCI
231/* This is really pathetic... */ 312/* This is really pathetic... */
232extern volatile int pci_poke_in_progress; 313extern volatile int pci_poke_in_progress;
@@ -788,7 +869,8 @@ void __init cheetah_ecache_flush_init(void)
788 cheetah_error_log[i].afsr = CHAFSR_INVALID; 869 cheetah_error_log[i].afsr = CHAFSR_INVALID;
789 870
790 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 871 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
791 if ((ver >> 32) == 0x003e0016) { 872 if ((ver >> 32) == __JALAPENO_ID ||
873 (ver >> 32) == __SERRANO_ID) {
792 cheetah_error_table = &__jalapeno_error_table[0]; 874 cheetah_error_table = &__jalapeno_error_table[0];
793 cheetah_afsr_errors = JPAFSR_ERRORS; 875 cheetah_afsr_errors = JPAFSR_ERRORS;
794 } else if ((ver >> 32) == 0x003e0015) { 876 } else if ((ver >> 32) == 0x003e0015) {
@@ -1666,6 +1748,238 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1666 regs->tpc); 1748 regs->tpc);
1667} 1749}
1668 1750
1751struct sun4v_error_entry {
1752 u64 err_handle;
1753 u64 err_stick;
1754
1755 u32 err_type;
1756#define SUN4V_ERR_TYPE_UNDEFINED 0
1757#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1758#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1759#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1760#define SUN4V_ERR_TYPE_WARNING_RES 4
1761
1762 u32 err_attrs;
1763#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1764#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1765#define SUN4V_ERR_ATTRS_PIO 0x00000004
1766#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1767#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1768#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1769#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1770#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1771
1772 u64 err_raddr;
1773 u32 err_size;
1774 u16 err_cpu;
1775 u16 err_pad;
1776};
1777
1778static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1779static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1780
1781static const char *sun4v_err_type_to_str(u32 type)
1782{
1783 switch (type) {
1784 case SUN4V_ERR_TYPE_UNDEFINED:
1785 return "undefined";
1786 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1787 return "uncorrected resumable";
1788 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1789 return "precise nonresumable";
1790 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1791 return "deferred nonresumable";
1792 case SUN4V_ERR_TYPE_WARNING_RES:
1793 return "warning resumable";
1794 default:
1795 return "unknown";
1796 };
1797}
1798
1799static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1800{
1801 int cnt;
1802
1803 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1804 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1805 pfx,
1806 ent->err_handle, ent->err_stick,
1807 ent->err_type,
1808 sun4v_err_type_to_str(ent->err_type));
1809 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1810 pfx,
1811 ent->err_attrs,
1812 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1813 "processor" : ""),
1814 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1815 "memory" : ""),
1816 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1817 "pio" : ""),
1818 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1819 "integer-regs" : ""),
1820 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1821 "fpu-regs" : ""),
1822 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1823 "user" : ""),
1824 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1825 "privileged" : ""),
1826 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1827 "queue-full" : ""));
1828 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1829 pfx,
1830 ent->err_raddr, ent->err_size, ent->err_cpu);
1831
1832 if ((cnt = atomic_read(ocnt)) != 0) {
1833 atomic_set(ocnt, 0);
1834 wmb();
1835 printk("%s: Queue overflowed %d times.\n",
1836 pfx, cnt);
1837 }
1838}
1839
1840/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1841 * Log the event and clear the first word of the entry.
1842 */
1843void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1844{
1845 struct sun4v_error_entry *ent, local_copy;
1846 struct trap_per_cpu *tb;
1847 unsigned long paddr;
1848 int cpu;
1849
1850 cpu = get_cpu();
1851
1852 tb = &trap_block[cpu];
1853 paddr = tb->resum_kernel_buf_pa + offset;
1854 ent = __va(paddr);
1855
1856 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1857
1858 /* We have a local copy now, so release the entry. */
1859 ent->err_handle = 0;
1860 wmb();
1861
1862 put_cpu();
1863
1864 sun4v_log_error(&local_copy, cpu,
1865 KERN_ERR "RESUMABLE ERROR",
1866 &sun4v_resum_oflow_cnt);
1867}
1868
1869/* If we try to printk() we'll probably make matters worse, by trying
1870 * to retake locks this cpu already holds or causing more errors. So
1871 * just bump a counter, and we'll report these counter bumps above.
1872 */
1873void sun4v_resum_overflow(struct pt_regs *regs)
1874{
1875 atomic_inc(&sun4v_resum_oflow_cnt);
1876}
1877
1878/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1879 * Log the event, clear the first word of the entry, and die.
1880 */
1881void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1882{
1883 struct sun4v_error_entry *ent, local_copy;
1884 struct trap_per_cpu *tb;
1885 unsigned long paddr;
1886 int cpu;
1887
1888 cpu = get_cpu();
1889
1890 tb = &trap_block[cpu];
1891 paddr = tb->nonresum_kernel_buf_pa + offset;
1892 ent = __va(paddr);
1893
1894 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1895
1896 /* We have a local copy now, so release the entry. */
1897 ent->err_handle = 0;
1898 wmb();
1899
1900 put_cpu();
1901
1902#ifdef CONFIG_PCI
1903 /* Check for the special PCI poke sequence. */
1904 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1905 pci_poke_faulted = 1;
1906 regs->tpc += 4;
1907 regs->tnpc = regs->tpc + 4;
1908 return;
1909 }
1910#endif
1911
1912 sun4v_log_error(&local_copy, cpu,
1913 KERN_EMERG "NON-RESUMABLE ERROR",
1914 &sun4v_nonresum_oflow_cnt);
1915
1916 panic("Non-resumable error.");
1917}
1918
1919/* If we try to printk() we'll probably make matters worse, by trying
1920 * to retake locks this cpu already holds or causing more errors. So
1921 * just bump a counter, and we'll report these counter bumps above.
1922 */
1923void sun4v_nonresum_overflow(struct pt_regs *regs)
1924{
1925 /* XXX Actually even this can make not that much sense. Perhaps
1926 * XXX we should just pull the plug and panic directly from here?
1927 */
1928 atomic_inc(&sun4v_nonresum_oflow_cnt);
1929}
1930
1931unsigned long sun4v_err_itlb_vaddr;
1932unsigned long sun4v_err_itlb_ctx;
1933unsigned long sun4v_err_itlb_pte;
1934unsigned long sun4v_err_itlb_error;
1935
1936void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1937{
1938 if (tl > 1)
1939 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1940
1941 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1942 regs->tpc, tl);
1943 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1944 "pte[%lx] error[%lx]\n",
1945 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1946 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1947
1948 prom_halt();
1949}
1950
1951unsigned long sun4v_err_dtlb_vaddr;
1952unsigned long sun4v_err_dtlb_ctx;
1953unsigned long sun4v_err_dtlb_pte;
1954unsigned long sun4v_err_dtlb_error;
1955
1956void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1957{
1958 if (tl > 1)
1959 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1960
1961 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1962 regs->tpc, tl);
1963 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1964 "pte[%lx] error[%lx]\n",
1965 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1966 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1967
1968 prom_halt();
1969}
1970
1971void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1972{
1973 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1974 err, op);
1975}
1976
1977void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1978{
1979 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1980 err, op);
1981}
1982
1669void do_fpe_common(struct pt_regs *regs) 1983void do_fpe_common(struct pt_regs *regs)
1670{ 1984{
1671 if (regs->tstate & TSTATE_PRIV) { 1985 if (regs->tstate & TSTATE_PRIV) {
@@ -1924,10 +2238,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
1924 } 2238 }
1925 user_instruction_dump ((unsigned int __user *) regs->tpc); 2239 user_instruction_dump ((unsigned int __user *) regs->tpc);
1926 } 2240 }
2241#if 0
1927#ifdef CONFIG_SMP 2242#ifdef CONFIG_SMP
1928 smp_report_regs(); 2243 smp_report_regs();
1929#endif 2244#endif
1930 2245#endif
1931 if (regs->tstate & TSTATE_PRIV) 2246 if (regs->tstate & TSTATE_PRIV)
1932 do_exit(SIGKILL); 2247 do_exit(SIGKILL);
1933 do_exit(SIGSEGV); 2248 do_exit(SIGSEGV);
@@ -1958,6 +2273,11 @@ void do_illegal_instruction(struct pt_regs *regs)
1958 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { 2273 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1959 if (handle_ldf_stq(insn, regs)) 2274 if (handle_ldf_stq(insn, regs))
1960 return; 2275 return;
2276 } else if (tlb_type == hypervisor) {
2277 extern int vis_emul(struct pt_regs *, unsigned int);
2278
2279 if (!vis_emul(regs, insn))
2280 return;
1961 } 2281 }
1962 } 2282 }
1963 info.si_signo = SIGILL; 2283 info.si_signo = SIGILL;
@@ -1968,6 +2288,8 @@ void do_illegal_instruction(struct pt_regs *regs)
1968 force_sig_info(SIGILL, &info, current); 2288 force_sig_info(SIGILL, &info, current);
1969} 2289}
1970 2290
2291extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2292
1971void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) 2293void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1972{ 2294{
1973 siginfo_t info; 2295 siginfo_t info;
@@ -1977,13 +2299,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
1977 return; 2299 return;
1978 2300
1979 if (regs->tstate & TSTATE_PRIV) { 2301 if (regs->tstate & TSTATE_PRIV) {
1980 extern void kernel_unaligned_trap(struct pt_regs *regs, 2302 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
1981 unsigned int insn,
1982 unsigned long sfar,
1983 unsigned long sfsr);
1984
1985 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1986 sfar, sfsr);
1987 return; 2303 return;
1988 } 2304 }
1989 info.si_signo = SIGBUS; 2305 info.si_signo = SIGBUS;
@@ -1994,6 +2310,26 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
1994 force_sig_info(SIGBUS, &info, current); 2310 force_sig_info(SIGBUS, &info, current);
1995} 2311}
1996 2312
2313void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2314{
2315 siginfo_t info;
2316
2317 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2318 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2319 return;
2320
2321 if (regs->tstate & TSTATE_PRIV) {
2322 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2323 return;
2324 }
2325 info.si_signo = SIGBUS;
2326 info.si_errno = 0;
2327 info.si_code = BUS_ADRALN;
2328 info.si_addr = (void __user *) addr;
2329 info.si_trapno = 0;
2330 force_sig_info(SIGBUS, &info, current);
2331}
2332
1997void do_privop(struct pt_regs *regs) 2333void do_privop(struct pt_regs *regs)
1998{ 2334{
1999 siginfo_t info; 2335 siginfo_t info;
@@ -2130,7 +2466,22 @@ void do_getpsr(struct pt_regs *regs)
2130 } 2466 }
2131} 2467}
2132 2468
2469struct trap_per_cpu trap_block[NR_CPUS];
2470
2471/* This can get invoked before sched_init() so play it super safe
2472 * and use hard_smp_processor_id().
2473 */
2474void init_cur_cpu_trap(struct thread_info *t)
2475{
2476 int cpu = hard_smp_processor_id();
2477 struct trap_per_cpu *p = &trap_block[cpu];
2478
2479 p->thread = t;
2480 p->pgd_paddr = 0;
2481}
2482
2133extern void thread_info_offsets_are_bolixed_dave(void); 2483extern void thread_info_offsets_are_bolixed_dave(void);
2484extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2134 2485
2135/* Only invoked on boot processor. */ 2486/* Only invoked on boot processor. */
2136void __init trap_init(void) 2487void __init trap_init(void)
@@ -2154,7 +2505,6 @@ void __init trap_init(void)
2154 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || 2505 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2155 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || 2506 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2156 TI_PCR != offsetof(struct thread_info, pcr_reg) || 2507 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2157 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2158 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || 2508 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2159 TI_NEW_CHILD != offsetof(struct thread_info, new_child) || 2509 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2160 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || 2510 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
@@ -2165,6 +2515,29 @@ void __init trap_init(void)
2165 (TI_FPREGS & (64 - 1))) 2515 (TI_FPREGS & (64 - 1)))
2166 thread_info_offsets_are_bolixed_dave(); 2516 thread_info_offsets_are_bolixed_dave();
2167 2517
2518 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2519 (TRAP_PER_CPU_PGD_PADDR !=
2520 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2521 (TRAP_PER_CPU_CPU_MONDO_PA !=
2522 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2523 (TRAP_PER_CPU_DEV_MONDO_PA !=
2524 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2525 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2526 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2527 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2528 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2529 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2530 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2531 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2532 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2533 (TRAP_PER_CPU_FAULT_INFO !=
2534 offsetof(struct trap_per_cpu, fault_info)) ||
2535 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2536 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2537 (TRAP_PER_CPU_CPU_LIST_PA !=
2538 offsetof(struct trap_per_cpu, cpu_list_pa)))
2539 trap_per_cpu_offsets_are_bolixed_dave();
2540
2168 /* Attach to the address space of init_task. On SMP we 2541 /* Attach to the address space of init_task. On SMP we
2169 * do this in smp.c:smp_callin for other cpus. 2542 * do this in smp.c:smp_callin for other cpus.
2170 */ 2543 */
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
new file mode 100644
index 000000000000..118baea44f69
--- /dev/null
+++ b/arch/sparc64/kernel/tsb.S
@@ -0,0 +1,442 @@
1/* tsb.S: Sparc64 TSB table handling.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <asm/tsb.h>
7#include <asm/hypervisor.h>
8
9 .text
10 .align 32
11
12 /* Invoked from TLB miss handler, we are in the
13 * MMU global registers and they are setup like
14 * this:
15 *
16 * %g1: TSB entry pointer
17 * %g2: available temporary
18 * %g3: FAULT_CODE_{D,I}TLB
19 * %g4: available temporary
20 * %g5: available temporary
21 * %g6: TAG TARGET
22 * %g7: available temporary, will be loaded by us with
23 * the physical address base of the linux page
24 * tables for the current address space
25 */
26tsb_miss_dtlb:
27 mov TLB_TAG_ACCESS, %g4
28 ba,pt %xcc, tsb_miss_page_table_walk
29 ldxa [%g4] ASI_DMMU, %g4
30
31tsb_miss_itlb:
32 mov TLB_TAG_ACCESS, %g4
33 ba,pt %xcc, tsb_miss_page_table_walk
34 ldxa [%g4] ASI_IMMU, %g4
35
36 /* At this point we have:
37 * %g1 -- TSB entry address
38 * %g3 -- FAULT_CODE_{D,I}TLB
39 * %g4 -- missing virtual address
40 * %g6 -- TAG TARGET (vaddr >> 22)
41 */
42tsb_miss_page_table_walk:
43 TRAP_LOAD_PGD_PHYS(%g7, %g5)
44
45 /* And now we have the PGD base physical address in %g7. */
46tsb_miss_page_table_walk_sun4v_fastpath:
47 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
48
49 /* At this point we have:
50 * %g1 -- TSB entry address
51 * %g3 -- FAULT_CODE_{D,I}TLB
52 * %g5 -- physical address of PTE in Linux page tables
53 * %g6 -- TAG TARGET (vaddr >> 22)
54 */
55tsb_reload:
56 TSB_LOCK_TAG(%g1, %g2, %g7)
57
58 /* Load and check PTE. */
59 ldxa [%g5] ASI_PHYS_USE_EC, %g5
60 mov 1, %g7
61 sllx %g7, TSB_TAG_INVALID_BIT, %g7
62 brgez,a,pn %g5, tsb_do_fault
63 TSB_STORE(%g1, %g7)
64
65 TSB_WRITE(%g1, %g5, %g6)
66
67 /* Finally, load TLB and return from trap. */
68tsb_tlb_reload:
69 cmp %g3, FAULT_CODE_DTLB
70 bne,pn %xcc, tsb_itlb_load
71 nop
72
73tsb_dtlb_load:
74
75661: stxa %g5, [%g0] ASI_DTLB_DATA_IN
76 retry
77 .section .sun4v_2insn_patch, "ax"
78 .word 661b
79 nop
80 nop
81 .previous
82
83 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
84 * instruction get nop'd out and we get here to branch
85 * to the sun4v tlb load code. The registers are setup
86 * as follows:
87 *
88 * %g4: vaddr
89 * %g5: PTE
90 * %g6: TAG
91 *
92 * The sun4v TLB load wants the PTE in %g3 so we fix that
93 * up here.
94 */
95 ba,pt %xcc, sun4v_dtlb_load
96 mov %g5, %g3
97
98tsb_itlb_load:
99 /* Executable bit must be set. */
100661: andcc %g5, _PAGE_EXEC_4U, %g0
101 .section .sun4v_1insn_patch, "ax"
102 .word 661b
103 andcc %g5, _PAGE_EXEC_4V, %g0
104 .previous
105
106 be,pn %xcc, tsb_do_fault
107 nop
108
109661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
110 retry
111 .section .sun4v_2insn_patch, "ax"
112 .word 661b
113 nop
114 nop
115 .previous
116
117 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
118 * instruction get nop'd out and we get here to branch
119 * to the sun4v tlb load code. The registers are setup
120 * as follows:
121 *
122 * %g4: vaddr
123 * %g5: PTE
124 * %g6: TAG
125 *
126 * The sun4v TLB load wants the PTE in %g3 so we fix that
127 * up here.
128 */
129 ba,pt %xcc, sun4v_itlb_load
130 mov %g5, %g3
131
132 /* No valid entry in the page tables, do full fault
133 * processing.
134 */
135
136 .globl tsb_do_fault
137tsb_do_fault:
138 cmp %g3, FAULT_CODE_DTLB
139
140661: rdpr %pstate, %g5
141 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
142 .section .sun4v_2insn_patch, "ax"
143 .word 661b
144 SET_GL(1)
145 ldxa [%g0] ASI_SCRATCHPAD, %g4
146 .previous
147
148 bne,pn %xcc, tsb_do_itlb_fault
149 nop
150
151tsb_do_dtlb_fault:
152 rdpr %tl, %g3
153 cmp %g3, 1
154
155661: mov TLB_TAG_ACCESS, %g4
156 ldxa [%g4] ASI_DMMU, %g5
157 .section .sun4v_2insn_patch, "ax"
158 .word 661b
159 ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
160 nop
161 .previous
162
163 be,pt %xcc, sparc64_realfault_common
164 mov FAULT_CODE_DTLB, %g4
165 ba,pt %xcc, winfix_trampoline
166 nop
167
168tsb_do_itlb_fault:
169 rdpr %tpc, %g5
170 ba,pt %xcc, sparc64_realfault_common
171 mov FAULT_CODE_ITLB, %g4
172
173 .globl sparc64_realfault_common
174sparc64_realfault_common:
175 /* fault code in %g4, fault address in %g5, etrap will
176 * preserve these two values in %l4 and %l5 respectively
177 */
178 ba,pt %xcc, etrap ! Save trap state
1791: rd %pc, %g7 ! ...
180 stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code
181 stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address
182 call do_sparc64_fault ! Call fault handler
183 add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
184 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
185 nop ! Delay slot (fill me)
186
187winfix_trampoline:
188 rdpr %tpc, %g3 ! Prepare winfixup TNPC
189 or %g3, 0x7c, %g3 ! Compute branch offset
190 wrpr %g3, %tnpc ! Write it into TNPC
191 done ! Trap return
192
193 /* Insert an entry into the TSB.
194 *
195 * %o0: TSB entry pointer (virt or phys address)
196 * %o1: tag
197 * %o2: pte
198 */
199 .align 32
200 .globl __tsb_insert
201__tsb_insert:
202 rdpr %pstate, %o5
203 wrpr %o5, PSTATE_IE, %pstate
204 TSB_LOCK_TAG(%o0, %g2, %g3)
205 TSB_WRITE(%o0, %o2, %o1)
206 wrpr %o5, %pstate
207 retl
208 nop
209 .size __tsb_insert, .-__tsb_insert
210
211 /* Flush the given TSB entry if it has the matching
212 * tag.
213 *
214 * %o0: TSB entry pointer (virt or phys address)
215 * %o1: tag
216 */
217 .align 32
218 .globl tsb_flush
219 .type tsb_flush,#function
220tsb_flush:
221 sethi %hi(TSB_TAG_LOCK_HIGH), %g2
2221: TSB_LOAD_TAG(%o0, %g1)
223 srlx %g1, 32, %o3
224 andcc %o3, %g2, %g0
225 bne,pn %icc, 1b
226 membar #LoadLoad
227 cmp %g1, %o1
228 mov 1, %o3
229 bne,pt %xcc, 2f
230 sllx %o3, TSB_TAG_INVALID_BIT, %o3
231 TSB_CAS_TAG(%o0, %g1, %o3)
232 cmp %g1, %o3
233 bne,pn %xcc, 1b
234 nop
2352: retl
236 TSB_MEMBAR
237 .size tsb_flush, .-tsb_flush
238
239 /* Reload MMU related context switch state at
240 * schedule() time.
241 *
242 * %o0: page table physical address
243 * %o1: TSB register value
244 * %o2: TSB virtual address
245 * %o3: TSB mapping locked PTE
246 * %o4: Hypervisor TSB descriptor physical address
247 *
248 * We have to run this whole thing with interrupts
249 * disabled so that the current cpu doesn't change
250 * due to preemption.
251 */
252 .align 32
253 .globl __tsb_context_switch
254 .type __tsb_context_switch,#function
255__tsb_context_switch:
256 rdpr %pstate, %o5
257 wrpr %o5, PSTATE_IE, %pstate
258
259 ldub [%g6 + TI_CPU], %g1
260 sethi %hi(trap_block), %g2
261 sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
262 or %g2, %lo(trap_block), %g2
263 add %g2, %g1, %g2
264 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
265
266 sethi %hi(tlb_type), %g1
267 lduw [%g1 + %lo(tlb_type)], %g1
268 cmp %g1, 3
269 bne,pt %icc, 1f
270 nop
271
272 /* Hypervisor TSB switch. */
273 mov SCRATCHPAD_UTSBREG1, %g1
274 stxa %o1, [%g1] ASI_SCRATCHPAD
275 mov -1, %g2
276 mov SCRATCHPAD_UTSBREG2, %g1
277 stxa %g2, [%g1] ASI_SCRATCHPAD
278
279 /* Save away %o5's %pstate, we have to use %o5 for
280 * the hypervisor call.
281 */
282 mov %o5, %g1
283
284 mov HV_FAST_MMU_TSB_CTXNON0, %o5
285 mov 1, %o0
286 mov %o4, %o1
287 ta HV_FAST_TRAP
288
289 /* Finish up and restore %o5. */
290 ba,pt %xcc, 9f
291 mov %g1, %o5
292
293 /* SUN4U TSB switch. */
2941: mov TSB_REG, %g1
295 stxa %o1, [%g1] ASI_DMMU
296 membar #Sync
297 stxa %o1, [%g1] ASI_IMMU
298 membar #Sync
299
3002: brz %o2, 9f
301 nop
302
303 sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
304 mov TLB_TAG_ACCESS, %g1
305 lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
306 stxa %o2, [%g1] ASI_DMMU
307 membar #Sync
308 sllx %g2, 3, %g2
309 stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS
310 membar #Sync
3119:
312 wrpr %o5, %pstate
313
314 retl
315 nop
316 .size __tsb_context_switch, .-__tsb_context_switch
317
318#define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \
319 (1 << TSB_TAG_INVALID_BIT))
320
321 .align 32
322 .globl copy_tsb
323 .type copy_tsb,#function
324copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
325 * %o2=new_tsb_base, %o3=new_tsb_size
326 */
327 sethi %uhi(TSB_PASS_BITS), %g7
328 srlx %o3, 4, %o3
329 add %o0, %o1, %g1 /* end of old tsb */
330 sllx %g7, 32, %g7
331 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
332
333661: prefetcha [%o0] ASI_N, #one_read
334 .section .tsb_phys_patch, "ax"
335 .word 661b
336 prefetcha [%o0] ASI_PHYS_USE_EC, #one_read
337 .previous
338
33990: andcc %o0, (64 - 1), %g0
340 bne 1f
341 add %o0, 64, %o5
342
343661: prefetcha [%o5] ASI_N, #one_read
344 .section .tsb_phys_patch, "ax"
345 .word 661b
346 prefetcha [%o5] ASI_PHYS_USE_EC, #one_read
347 .previous
348
3491: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */
350 andcc %g2, %g7, %g0 /* LOCK or INVALID set? */
351 bne,pn %xcc, 80f /* Skip it */
352 sllx %g2, 22, %o4 /* TAG --> VADDR */
353
354 /* This can definitely be computed faster... */
355 srlx %o0, 4, %o5 /* Build index */
356 and %o5, 511, %o5 /* Mask index */
357 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
358 or %o4, %o5, %o4 /* Full VADDR. */
359 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
360 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
361 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
362 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
363 add %o4, 0x8, %o4 /* Advance to TTE */
364 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
365
36680: add %o0, 16, %o0
367 cmp %o0, %g1
368 bne,pt %xcc, 90b
369 nop
370
371 retl
372 TSB_MEMBAR
373 .size copy_tsb, .-copy_tsb
374
375 /* Set the invalid bit in all TSB entries. */
376 .align 32
377 .globl tsb_init
378 .type tsb_init,#function
379tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */
380 prefetch [%o0 + 0x000], #n_writes
381 mov 1, %g1
382 prefetch [%o0 + 0x040], #n_writes
383 sllx %g1, TSB_TAG_INVALID_BIT, %g1
384 prefetch [%o0 + 0x080], #n_writes
3851: prefetch [%o0 + 0x0c0], #n_writes
386 stx %g1, [%o0 + 0x00]
387 stx %g1, [%o0 + 0x10]
388 stx %g1, [%o0 + 0x20]
389 stx %g1, [%o0 + 0x30]
390 prefetch [%o0 + 0x100], #n_writes
391 stx %g1, [%o0 + 0x40]
392 stx %g1, [%o0 + 0x50]
393 stx %g1, [%o0 + 0x60]
394 stx %g1, [%o0 + 0x70]
395 prefetch [%o0 + 0x140], #n_writes
396 stx %g1, [%o0 + 0x80]
397 stx %g1, [%o0 + 0x90]
398 stx %g1, [%o0 + 0xa0]
399 stx %g1, [%o0 + 0xb0]
400 prefetch [%o0 + 0x180], #n_writes
401 stx %g1, [%o0 + 0xc0]
402 stx %g1, [%o0 + 0xd0]
403 stx %g1, [%o0 + 0xe0]
404 stx %g1, [%o0 + 0xf0]
405 subcc %o1, 0x100, %o1
406 bne,pt %xcc, 1b
407 add %o0, 0x100, %o0
408 retl
409 nop
410 nop
411 nop
412 .size tsb_init, .-tsb_init
413
414 .globl NGtsb_init
415 .type NGtsb_init,#function
416NGtsb_init:
417 rd %asi, %g2
418 mov 1, %g1
419 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
420 sllx %g1, TSB_TAG_INVALID_BIT, %g1
4211: stxa %g1, [%o0 + 0x00] %asi
422 stxa %g1, [%o0 + 0x10] %asi
423 stxa %g1, [%o0 + 0x20] %asi
424 stxa %g1, [%o0 + 0x30] %asi
425 stxa %g1, [%o0 + 0x40] %asi
426 stxa %g1, [%o0 + 0x50] %asi
427 stxa %g1, [%o0 + 0x60] %asi
428 stxa %g1, [%o0 + 0x70] %asi
429 stxa %g1, [%o0 + 0x80] %asi
430 stxa %g1, [%o0 + 0x90] %asi
431 stxa %g1, [%o0 + 0xa0] %asi
432 stxa %g1, [%o0 + 0xb0] %asi
433 stxa %g1, [%o0 + 0xc0] %asi
434 stxa %g1, [%o0 + 0xd0] %asi
435 stxa %g1, [%o0 + 0xe0] %asi
436 stxa %g1, [%o0 + 0xf0] %asi
437 subcc %o1, 0x100, %o1
438 bne,pt %xcc, 1b
439 add %o0, 0x100, %o0
440 retl
441 wr %g2, 0x0, %asi
442 .size NGtsb_init, .-NGtsb_init
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 8365bc1f81f3..5d901519db55 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -1,7 +1,6 @@
1/* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $ 1/* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions.
2 * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions.
3 * 2 *
4 * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu) 3 * Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net)
5 */ 4 */
6 5
7#include <linux/config.h> 6#include <linux/config.h>
@@ -19,7 +18,7 @@ tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
19tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) 18tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
20tl0_iax: membar #Sync 19tl0_iax: membar #Sync
21 TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) 20 TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception)
22tl0_resv009: BTRAP(0x9) 21tl0_itsb_4v: SUN4V_ITSB_MISS
23tl0_iae: membar #Sync 22tl0_iae: membar #Sync
24 TRAP_NOSAVE_7INSNS(__spitfire_access_error) 23 TRAP_NOSAVE_7INSNS(__spitfire_access_error)
25tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) 24tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
@@ -38,7 +37,7 @@ tl0_div0: TRAP(do_div0)
38tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) 37tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
39tl0_resv02f: BTRAP(0x2f) 38tl0_resv02f: BTRAP(0x2f)
40tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) 39tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception)
41tl0_resv031: BTRAP(0x31) 40tl0_dtsb_4v: SUN4V_DTSB_MISS
42tl0_dae: membar #Sync 41tl0_dae: membar #Sync
43 TRAP_NOSAVE_7INSNS(__spitfire_access_error) 42 TRAP_NOSAVE_7INSNS(__spitfire_access_error)
44tl0_resv033: BTRAP(0x33) 43tl0_resv033: BTRAP(0x33)
@@ -52,12 +51,13 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
52tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) 51tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
53tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) 52tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
54tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) 53tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
54tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
55#else 55#else
56tl0_irq1: BTRAP(0x41) 56tl0_irq1: BTRAP(0x41)
57tl0_irq2: BTRAP(0x42) 57tl0_irq2: BTRAP(0x42)
58tl0_irq3: BTRAP(0x43) 58tl0_irq3: BTRAP(0x43)
59tl0_irq4: BTRAP(0x44)
59#endif 60#endif
60tl0_irq4: TRAP_IRQ(handler_irq, 4)
61tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6) 61tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6)
62tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8) 62tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8)
63tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10) 63tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10)
@@ -78,9 +78,9 @@ tl0_vaw: TRAP(do_vaw)
78tl0_cee: membar #Sync 78tl0_cee: membar #Sync
79 TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) 79 TRAP_NOSAVE_7INSNS(__spitfire_cee_trap)
80tl0_iamiss: 80tl0_iamiss:
81#include "itlb_base.S" 81#include "itlb_miss.S"
82tl0_damiss: 82tl0_damiss:
83#include "dtlb_base.S" 83#include "dtlb_miss.S"
84tl0_daprot: 84tl0_daprot:
85#include "dtlb_prot.S" 85#include "dtlb_prot.S"
86tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */ 86tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */
@@ -88,15 +88,18 @@ tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */
88tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ 88tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */
89tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) 89tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
90tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) 90tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
91tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f) 91tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo)
92tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo)
93tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo)
94tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo)
92tl0_s0n: SPILL_0_NORMAL 95tl0_s0n: SPILL_0_NORMAL
93tl0_s1n: SPILL_1_NORMAL 96tl0_s1n: SPILL_1_NORMAL
94tl0_s2n: SPILL_2_NORMAL 97tl0_s2n: SPILL_2_NORMAL
95tl0_s3n: SPILL_3_NORMAL 98tl0_s3n: SPILL_0_NORMAL_ETRAP
96tl0_s4n: SPILL_4_NORMAL 99tl0_s4n: SPILL_1_GENERIC_ETRAP
97tl0_s5n: SPILL_5_NORMAL 100tl0_s5n: SPILL_1_GENERIC_ETRAP_FIXUP
98tl0_s6n: SPILL_6_NORMAL 101tl0_s6n: SPILL_2_GENERIC_ETRAP
99tl0_s7n: SPILL_7_NORMAL 102tl0_s7n: SPILL_2_GENERIC_ETRAP_FIXUP
100tl0_s0o: SPILL_0_OTHER 103tl0_s0o: SPILL_0_OTHER
101tl0_s1o: SPILL_1_OTHER 104tl0_s1o: SPILL_1_OTHER
102tl0_s2o: SPILL_2_OTHER 105tl0_s2o: SPILL_2_OTHER
@@ -110,9 +113,9 @@ tl0_f1n: FILL_1_NORMAL
110tl0_f2n: FILL_2_NORMAL 113tl0_f2n: FILL_2_NORMAL
111tl0_f3n: FILL_3_NORMAL 114tl0_f3n: FILL_3_NORMAL
112tl0_f4n: FILL_4_NORMAL 115tl0_f4n: FILL_4_NORMAL
113tl0_f5n: FILL_5_NORMAL 116tl0_f5n: FILL_0_NORMAL_RTRAP
114tl0_f6n: FILL_6_NORMAL 117tl0_f6n: FILL_1_GENERIC_RTRAP
115tl0_f7n: FILL_7_NORMAL 118tl0_f7n: FILL_2_GENERIC_RTRAP
116tl0_f0o: FILL_0_OTHER 119tl0_f0o: FILL_0_OTHER
117tl0_f1o: FILL_1_OTHER 120tl0_f1o: FILL_1_OTHER
118tl0_f2o: FILL_2_OTHER 121tl0_f2o: FILL_2_OTHER
@@ -128,7 +131,7 @@ tl0_flushw: FLUSH_WINDOW_TRAP
128tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) 131tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107)
129 .globl tl0_solaris 132 .globl tl0_solaris
130tl0_solaris: SOLARIS_SYSCALL_TRAP 133tl0_solaris: SOLARIS_SYSCALL_TRAP
131tl0_netbsd: NETBSD_SYSCALL_TRAP 134tl0_resv109: BTRAP(0x109)
132tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e) 135tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e)
133tl0_resv10f: BTRAP(0x10f) 136tl0_resv10f: BTRAP(0x10f)
134tl0_linux32: LINUX_32BIT_SYSCALL_TRAP 137tl0_linux32: LINUX_32BIT_SYSCALL_TRAP
@@ -179,7 +182,7 @@ sparc64_ttable_tl1:
179tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) 182tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
180tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) 183tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
181tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) 184tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
182tl1_resv009: BTRAPTL1(0x9) 185tl1_itsb_4v: SUN4V_ITSB_MISS
183tl1_iae: membar #Sync 186tl1_iae: membar #Sync
184 TRAP_NOSAVE_7INSNS(__spitfire_access_error) 187 TRAP_NOSAVE_7INSNS(__spitfire_access_error)
185tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) 188tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
@@ -198,7 +201,7 @@ tl1_div0: TRAPTL1(do_div0_tl1)
198tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) 201tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
199tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) 202tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
200tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) 203tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
201tl1_resv031: BTRAPTL1(0x31) 204tl1_dtsb_4v: SUN4V_DTSB_MISS
202tl1_dae: membar #Sync 205tl1_dae: membar #Sync
203 TRAP_NOSAVE_7INSNS(__spitfire_access_error) 206 TRAP_NOSAVE_7INSNS(__spitfire_access_error)
204tl1_resv033: BTRAPTL1(0x33) 207tl1_resv033: BTRAPTL1(0x33)
@@ -222,26 +225,10 @@ tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f)
222tl1_ivec: TRAP_IVEC 225tl1_ivec: TRAP_IVEC
223tl1_paw: TRAPTL1(do_paw_tl1) 226tl1_paw: TRAPTL1(do_paw_tl1)
224tl1_vaw: TRAPTL1(do_vaw_tl1) 227tl1_vaw: TRAPTL1(do_vaw_tl1)
225 228tl1_cee: BTRAPTL1(0x63)
226 /* The grotty trick to save %g1 into current->thread.cee_stuff
227 * is because when we take this trap we could be interrupting
228 * trap code already using the trap alternate global registers.
229 *
230 * We cross our fingers and pray that this store/load does
231 * not cause yet another CEE trap.
232 */
233tl1_cee: membar #Sync
234 stx %g1, [%g6 + TI_CEE_STUFF]
235 ldxa [%g0] ASI_AFSR, %g1
236 membar #Sync
237 stxa %g1, [%g0] ASI_AFSR
238 membar #Sync
239 ldx [%g6 + TI_CEE_STUFF], %g1
240 retry
241
242tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) 229tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67)
243tl1_damiss: 230tl1_damiss:
244#include "dtlb_backend.S" 231#include "dtlb_miss.S"
245tl1_daprot: 232tl1_daprot:
246#include "dtlb_prot.S" 233#include "dtlb_prot.S"
247tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */ 234tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index 70faf630603b..001e8518331f 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -277,7 +277,7 @@ static void kernel_mna_trap_fault(void)
277 regs->tstate |= (ASI_AIUS << 24UL); 277 regs->tstate |= (ASI_AIUS << 24UL);
278} 278}
279 279
280asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr) 280asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
281{ 281{
282 enum direction dir = decode_direction(insn); 282 enum direction dir = decode_direction(insn);
283 int size = decode_access_size(insn); 283 int size = decode_access_size(insn);
@@ -405,6 +405,9 @@ extern void do_privact(struct pt_regs *regs);
405extern void spitfire_data_access_exception(struct pt_regs *regs, 405extern void spitfire_data_access_exception(struct pt_regs *regs,
406 unsigned long sfsr, 406 unsigned long sfsr,
407 unsigned long sfar); 407 unsigned long sfar);
408extern void sun4v_data_access_exception(struct pt_regs *regs,
409 unsigned long addr,
410 unsigned long type_ctx);
408 411
409int handle_ldf_stq(u32 insn, struct pt_regs *regs) 412int handle_ldf_stq(u32 insn, struct pt_regs *regs)
410{ 413{
@@ -447,14 +450,20 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
447 break; 450 break;
448 } 451 }
449 default: 452 default:
450 spitfire_data_access_exception(regs, 0, addr); 453 if (tlb_type == hypervisor)
454 sun4v_data_access_exception(regs, addr, 0);
455 else
456 spitfire_data_access_exception(regs, 0, addr);
451 return 1; 457 return 1;
452 } 458 }
453 if (put_user (first >> 32, (u32 __user *)addr) || 459 if (put_user (first >> 32, (u32 __user *)addr) ||
454 __put_user ((u32)first, (u32 __user *)(addr + 4)) || 460 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
455 __put_user (second >> 32, (u32 __user *)(addr + 8)) || 461 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
456 __put_user ((u32)second, (u32 __user *)(addr + 12))) { 462 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
457 spitfire_data_access_exception(regs, 0, addr); 463 if (tlb_type == hypervisor)
464 sun4v_data_access_exception(regs, addr, 0);
465 else
466 spitfire_data_access_exception(regs, 0, addr);
458 return 1; 467 return 1;
459 } 468 }
460 } else { 469 } else {
@@ -467,7 +476,10 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
467 do_privact(regs); 476 do_privact(regs);
468 return 1; 477 return 1;
469 } else if (asi > ASI_SNFL) { 478 } else if (asi > ASI_SNFL) {
470 spitfire_data_access_exception(regs, 0, addr); 479 if (tlb_type == hypervisor)
480 sun4v_data_access_exception(regs, addr, 0);
481 else
482 spitfire_data_access_exception(regs, 0, addr);
471 return 1; 483 return 1;
472 } 484 }
473 switch (insn & 0x180000) { 485 switch (insn & 0x180000) {
@@ -484,7 +496,10 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
484 err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); 496 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
485 } 497 }
486 if (err && !(asi & 0x2 /* NF */)) { 498 if (err && !(asi & 0x2 /* NF */)) {
487 spitfire_data_access_exception(regs, 0, addr); 499 if (tlb_type == hypervisor)
500 sun4v_data_access_exception(regs, addr, 0);
501 else
502 spitfire_data_access_exception(regs, 0, addr);
488 return 1; 503 return 1;
489 } 504 }
490 if (asi & 0x8) /* Little */ { 505 if (asi & 0x8) /* Little */ {
@@ -548,7 +563,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
548 u32 insn; 563 u32 insn;
549 u32 first, second; 564 u32 first, second;
550 u64 value; 565 u64 value;
551 u8 asi, freg; 566 u8 freg;
552 int flag; 567 int flag;
553 struct fpustate *f = FPUSTATE; 568 struct fpustate *f = FPUSTATE;
554 569
@@ -557,7 +572,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
557 if (test_thread_flag(TIF_32BIT)) 572 if (test_thread_flag(TIF_32BIT))
558 pc = (u32)pc; 573 pc = (u32)pc;
559 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 574 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
560 asi = sfsr >> 16; 575 int asi = decode_asi(insn, regs);
561 if ((asi > ASI_SNFL) || 576 if ((asi > ASI_SNFL) ||
562 (asi < ASI_P)) 577 (asi < ASI_P))
563 goto daex; 578 goto daex;
@@ -587,7 +602,11 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
587 *(u64 *)(f->regs + freg) = value; 602 *(u64 *)(f->regs + freg) = value;
588 current_thread_info()->fpsaved[0] |= flag; 603 current_thread_info()->fpsaved[0] |= flag;
589 } else { 604 } else {
590daex: spitfire_data_access_exception(regs, sfsr, sfar); 605daex:
606 if (tlb_type == hypervisor)
607 sun4v_data_access_exception(regs, sfar, sfsr);
608 else
609 spitfire_data_access_exception(regs, sfsr, sfar);
591 return; 610 return;
592 } 611 }
593 advance(regs); 612 advance(regs);
@@ -600,7 +619,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
600 unsigned long tstate = regs->tstate; 619 unsigned long tstate = regs->tstate;
601 u32 insn; 620 u32 insn;
602 u64 value; 621 u64 value;
603 u8 asi, freg; 622 u8 freg;
604 int flag; 623 int flag;
605 struct fpustate *f = FPUSTATE; 624 struct fpustate *f = FPUSTATE;
606 625
@@ -609,8 +628,8 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
609 if (test_thread_flag(TIF_32BIT)) 628 if (test_thread_flag(TIF_32BIT))
610 pc = (u32)pc; 629 pc = (u32)pc;
611 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 630 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
631 int asi = decode_asi(insn, regs);
612 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); 632 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
613 asi = sfsr >> 16;
614 value = 0; 633 value = 0;
615 flag = (freg < 32) ? FPRS_DL : FPRS_DU; 634 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
616 if ((asi > ASI_SNFL) || 635 if ((asi > ASI_SNFL) ||
@@ -631,7 +650,11 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
631 __put_user ((u32)value, (u32 __user *)(sfar + 4))) 650 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
632 goto daex; 651 goto daex;
633 } else { 652 } else {
634daex: spitfire_data_access_exception(regs, sfsr, sfar); 653daex:
654 if (tlb_type == hypervisor)
655 sun4v_data_access_exception(regs, sfar, sfsr);
656 else
657 spitfire_data_access_exception(regs, sfsr, sfar);
635 return; 658 return;
636 } 659 }
637 advance(regs); 660 advance(regs);
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
index b35dc8dc995a..1f83fe6a82d6 100644
--- a/arch/sparc64/kernel/us2e_cpufreq.c
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -346,6 +346,9 @@ static int __init us2e_freq_init(void)
346 unsigned long manuf, impl, ver; 346 unsigned long manuf, impl, ver;
347 int ret; 347 int ret;
348 348
349 if (tlb_type != spitfire)
350 return -ENODEV;
351
349 __asm__("rdpr %%ver, %0" : "=r" (ver)); 352 __asm__("rdpr %%ver, %0" : "=r" (ver));
350 manuf = ((ver >> 48) & 0xffff); 353 manuf = ((ver >> 48) & 0xffff);
351 impl = ((ver >> 32) & 0xffff); 354 impl = ((ver >> 32) & 0xffff);
@@ -354,20 +357,16 @@ static int __init us2e_freq_init(void)
354 struct cpufreq_driver *driver; 357 struct cpufreq_driver *driver;
355 358
356 ret = -ENOMEM; 359 ret = -ENOMEM;
357 driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); 360 driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
358 if (!driver) 361 if (!driver)
359 goto err_out; 362 goto err_out;
360 memset(driver, 0, sizeof(*driver));
361 363
362 us2e_freq_table = kmalloc( 364 us2e_freq_table = kzalloc(
363 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), 365 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
364 GFP_KERNEL); 366 GFP_KERNEL);
365 if (!us2e_freq_table) 367 if (!us2e_freq_table)
366 goto err_out; 368 goto err_out;
367 369
368 memset(us2e_freq_table, 0,
369 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
370
371 driver->init = us2e_freq_cpu_init; 370 driver->init = us2e_freq_cpu_init;
372 driver->verify = us2e_freq_verify; 371 driver->verify = us2e_freq_verify;
373 driver->target = us2e_freq_target; 372 driver->target = us2e_freq_target;
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 6d1f9a3c464f..47e3acafb5be 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -203,6 +203,9 @@ static int __init us3_freq_init(void)
203 unsigned long manuf, impl, ver; 203 unsigned long manuf, impl, ver;
204 int ret; 204 int ret;
205 205
206 if (tlb_type != cheetah && tlb_type != cheetah_plus)
207 return -ENODEV;
208
206 __asm__("rdpr %%ver, %0" : "=r" (ver)); 209 __asm__("rdpr %%ver, %0" : "=r" (ver));
207 manuf = ((ver >> 48) & 0xffff); 210 manuf = ((ver >> 48) & 0xffff);
208 impl = ((ver >> 32) & 0xffff); 211 impl = ((ver >> 32) & 0xffff);
@@ -215,20 +218,16 @@ static int __init us3_freq_init(void)
215 struct cpufreq_driver *driver; 218 struct cpufreq_driver *driver;
216 219
217 ret = -ENOMEM; 220 ret = -ENOMEM;
218 driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); 221 driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
219 if (!driver) 222 if (!driver)
220 goto err_out; 223 goto err_out;
221 memset(driver, 0, sizeof(*driver));
222 224
223 us3_freq_table = kmalloc( 225 us3_freq_table = kzalloc(
224 (NR_CPUS * sizeof(struct us3_freq_percpu_info)), 226 (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
225 GFP_KERNEL); 227 GFP_KERNEL);
226 if (!us3_freq_table) 228 if (!us3_freq_table)
227 goto err_out; 229 goto err_out;
228 230
229 memset(us3_freq_table, 0,
230 (NR_CPUS * sizeof(struct us3_freq_percpu_info)));
231
232 driver->init = us3_freq_cpu_init; 231 driver->init = us3_freq_cpu_init;
233 driver->verify = us3_freq_verify; 232 driver->verify = us3_freq_verify;
234 driver->target = us3_freq_target; 233 driver->target = us3_freq_target;
diff --git a/arch/sparc64/kernel/visemul.c b/arch/sparc64/kernel/visemul.c
new file mode 100644
index 000000000000..84fedaa38aae
--- /dev/null
+++ b/arch/sparc64/kernel/visemul.c
@@ -0,0 +1,894 @@
1/* visemul.c: Emulation of VIS instructions.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/errno.h>
7#include <linux/thread_info.h>
8
9#include <asm/ptrace.h>
10#include <asm/pstate.h>
11#include <asm/system.h>
12#include <asm/fpumacro.h>
13#include <asm/uaccess.h>
14
15/* OPF field of various VIS instructions. */
16
17/* 000111011 - four 16-bit packs */
18#define FPACK16_OPF 0x03b
19
20/* 000111010 - two 32-bit packs */
21#define FPACK32_OPF 0x03a
22
23/* 000111101 - four 16-bit packs */
24#define FPACKFIX_OPF 0x03d
25
26/* 001001101 - four 16-bit expands */
27#define FEXPAND_OPF 0x04d
28
29/* 001001011 - two 32-bit merges */
30#define FPMERGE_OPF 0x04b
31
32/* 000110001 - 8-by-16-bit partitoned product */
33#define FMUL8x16_OPF 0x031
34
35/* 000110011 - 8-by-16-bit upper alpha partitioned product */
36#define FMUL8x16AU_OPF 0x033
37
38/* 000110101 - 8-by-16-bit lower alpha partitioned product */
39#define FMUL8x16AL_OPF 0x035
40
41/* 000110110 - upper 8-by-16-bit partitioned product */
42#define FMUL8SUx16_OPF 0x036
43
44/* 000110111 - lower 8-by-16-bit partitioned product */
45#define FMUL8ULx16_OPF 0x037
46
47/* 000111000 - upper 8-by-16-bit partitioned product */
48#define FMULD8SUx16_OPF 0x038
49
50/* 000111001 - lower unsigned 8-by-16-bit partitioned product */
51#define FMULD8ULx16_OPF 0x039
52
53/* 000101000 - four 16-bit compare; set rd if src1 > src2 */
54#define FCMPGT16_OPF 0x028
55
56/* 000101100 - two 32-bit compare; set rd if src1 > src2 */
57#define FCMPGT32_OPF 0x02c
58
59/* 000100000 - four 16-bit compare; set rd if src1 <= src2 */
60#define FCMPLE16_OPF 0x020
61
62/* 000100100 - two 32-bit compare; set rd if src1 <= src2 */
63#define FCMPLE32_OPF 0x024
64
65/* 000100010 - four 16-bit compare; set rd if src1 != src2 */
66#define FCMPNE16_OPF 0x022
67
68/* 000100110 - two 32-bit compare; set rd if src1 != src2 */
69#define FCMPNE32_OPF 0x026
70
71/* 000101010 - four 16-bit compare; set rd if src1 == src2 */
72#define FCMPEQ16_OPF 0x02a
73
74/* 000101110 - two 32-bit compare; set rd if src1 == src2 */
75#define FCMPEQ32_OPF 0x02e
76
77/* 000000000 - Eight 8-bit edge boundary processing */
78#define EDGE8_OPF 0x000
79
80/* 000000001 - Eight 8-bit edge boundary processing, no CC */
81#define EDGE8N_OPF 0x001
82
83/* 000000010 - Eight 8-bit edge boundary processing, little-endian */
84#define EDGE8L_OPF 0x002
85
86/* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */
87#define EDGE8LN_OPF 0x003
88
89/* 000000100 - Four 16-bit edge boundary processing */
90#define EDGE16_OPF 0x004
91
92/* 000000101 - Four 16-bit edge boundary processing, no CC */
93#define EDGE16N_OPF 0x005
94
95/* 000000110 - Four 16-bit edge boundary processing, little-endian */
96#define EDGE16L_OPF 0x006
97
98/* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */
99#define EDGE16LN_OPF 0x007
100
101/* 000001000 - Two 32-bit edge boundary processing */
102#define EDGE32_OPF 0x008
103
104/* 000001001 - Two 32-bit edge boundary processing, no CC */
105#define EDGE32N_OPF 0x009
106
107/* 000001010 - Two 32-bit edge boundary processing, little-endian */
108#define EDGE32L_OPF 0x00a
109
110/* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */
111#define EDGE32LN_OPF 0x00b
112
113/* 000111110 - distance between 8 8-bit components */
114#define PDIST_OPF 0x03e
115
116/* 000010000 - convert 8-bit 3-D address to blocked byte address */
117#define ARRAY8_OPF 0x010
118
119/* 000010010 - convert 16-bit 3-D address to blocked byte address */
120#define ARRAY16_OPF 0x012
121
122/* 000010100 - convert 32-bit 3-D address to blocked byte address */
123#define ARRAY32_OPF 0x014
124
125/* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */
126#define BMASK_OPF 0x019
127
128/* 001001100 - Permute bytes as specified by GSR.MASK */
129#define BSHUFFLE_OPF 0x04c
130
131#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
132#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
133
134#define VIS_OPF_SHIFT 5
135#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT)
136
137#define RS1(INSN) (((INSN) >> 24) & 0x1f)
138#define RS2(INSN) (((INSN) >> 0) & 0x1f)
139#define RD(INSN) (((INSN) >> 25) & 0x1f)
140
141static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
142 unsigned int rd, int from_kernel)
143{
144 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
145 if (from_kernel != 0)
146 __asm__ __volatile__("flushw");
147 else
148 flushw_user();
149 }
150}
151
152static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
153{
154 unsigned long value;
155
156 if (reg < 16)
157 return (!reg ? 0 : regs->u_regs[reg]);
158 if (regs->tstate & TSTATE_PRIV) {
159 struct reg_window *win;
160 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
161 value = win->locals[reg - 16];
162 } else if (test_thread_flag(TIF_32BIT)) {
163 struct reg_window32 __user *win32;
164 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
165 get_user(value, &win32->locals[reg - 16]);
166 } else {
167 struct reg_window __user *win;
168 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
169 get_user(value, &win->locals[reg - 16]);
170 }
171 return value;
172}
173
174static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg,
175 struct pt_regs *regs)
176{
177 BUG_ON(reg < 16);
178 BUG_ON(regs->tstate & TSTATE_PRIV);
179
180 if (test_thread_flag(TIF_32BIT)) {
181 struct reg_window32 __user *win32;
182 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
183 return (unsigned long __user *)&win32->locals[reg - 16];
184 } else {
185 struct reg_window __user *win;
186 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
187 return &win->locals[reg - 16];
188 }
189}
190
191static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg,
192 struct pt_regs *regs)
193{
194 BUG_ON(reg >= 16);
195 BUG_ON(regs->tstate & TSTATE_PRIV);
196
197 return &regs->u_regs[reg];
198}
199
200static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd)
201{
202 if (rd < 16) {
203 unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs);
204
205 *rd_kern = val;
206 } else {
207 unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs);
208
209 if (test_thread_flag(TIF_32BIT))
210 __put_user((u32)val, (u32 __user *)rd_user);
211 else
212 __put_user(val, rd_user);
213 }
214}
215
216static inline unsigned long fpd_regval(struct fpustate *f,
217 unsigned int insn_regnum)
218{
219 insn_regnum = (((insn_regnum & 1) << 5) |
220 (insn_regnum & 0x1e));
221
222 return *(unsigned long *) &f->regs[insn_regnum];
223}
224
225static inline unsigned long *fpd_regaddr(struct fpustate *f,
226 unsigned int insn_regnum)
227{
228 insn_regnum = (((insn_regnum & 1) << 5) |
229 (insn_regnum & 0x1e));
230
231 return (unsigned long *) &f->regs[insn_regnum];
232}
233
234static inline unsigned int fps_regval(struct fpustate *f,
235 unsigned int insn_regnum)
236{
237 return f->regs[insn_regnum];
238}
239
240static inline unsigned int *fps_regaddr(struct fpustate *f,
241 unsigned int insn_regnum)
242{
243 return &f->regs[insn_regnum];
244}
245
246struct edge_tab {
247 u16 left, right;
248};
249struct edge_tab edge8_tab[8] = {
250 { 0xff, 0x80 },
251 { 0x7f, 0xc0 },
252 { 0x3f, 0xe0 },
253 { 0x1f, 0xf0 },
254 { 0x0f, 0xf8 },
255 { 0x07, 0xfc },
256 { 0x03, 0xfe },
257 { 0x01, 0xff },
258};
259struct edge_tab edge8_tab_l[8] = {
260 { 0xff, 0x01 },
261 { 0xfe, 0x03 },
262 { 0xfc, 0x07 },
263 { 0xf8, 0x0f },
264 { 0xf0, 0x1f },
265 { 0xe0, 0x3f },
266 { 0xc0, 0x7f },
267 { 0x80, 0xff },
268};
269struct edge_tab edge16_tab[4] = {
270 { 0xf, 0x8 },
271 { 0x7, 0xc },
272 { 0x3, 0xe },
273 { 0x1, 0xf },
274};
275struct edge_tab edge16_tab_l[4] = {
276 { 0xf, 0x1 },
277 { 0xe, 0x3 },
278 { 0xc, 0x7 },
279 { 0x8, 0xf },
280};
281struct edge_tab edge32_tab[2] = {
282 { 0x3, 0x2 },
283 { 0x1, 0x3 },
284};
285struct edge_tab edge32_tab_l[2] = {
286 { 0x3, 0x1 },
287 { 0x2, 0x3 },
288};
289
290static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
291{
292 unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val;
293 u16 left, right;
294
295 maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
296 orig_rs1 = rs1 = fetch_reg(RS1(insn), regs);
297 orig_rs2 = rs2 = fetch_reg(RS2(insn), regs);
298
299 if (test_thread_flag(TIF_32BIT)) {
300 rs1 = rs1 & 0xffffffff;
301 rs2 = rs2 & 0xffffffff;
302 }
303 switch (opf) {
304 default:
305 case EDGE8_OPF:
306 case EDGE8N_OPF:
307 left = edge8_tab[rs1 & 0x7].left;
308 right = edge8_tab[rs2 & 0x7].right;
309 break;
310 case EDGE8L_OPF:
311 case EDGE8LN_OPF:
312 left = edge8_tab_l[rs1 & 0x7].left;
313 right = edge8_tab_l[rs2 & 0x7].right;
314 break;
315
316 case EDGE16_OPF:
317 case EDGE16N_OPF:
318 left = edge16_tab[(rs1 >> 1) & 0x3].left;
319 right = edge16_tab[(rs2 >> 1) & 0x3].right;
320 break;
321
322 case EDGE16L_OPF:
323 case EDGE16LN_OPF:
324 left = edge16_tab_l[(rs1 >> 1) & 0x3].left;
325 right = edge16_tab_l[(rs2 >> 1) & 0x3].right;
326 break;
327
328 case EDGE32_OPF:
329 case EDGE32N_OPF:
330 left = edge32_tab[(rs1 >> 2) & 0x1].left;
331 right = edge32_tab[(rs2 >> 2) & 0x1].right;
332 break;
333
334 case EDGE32L_OPF:
335 case EDGE32LN_OPF:
336 left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
337 right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
338 break;
339 };
340
341 if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
342 rd_val = right & left;
343 else
344 rd_val = left;
345
346 store_reg(regs, rd_val, RD(insn));
347
348 switch (opf) {
349 case EDGE8_OPF:
350 case EDGE8L_OPF:
351 case EDGE16_OPF:
352 case EDGE16L_OPF:
353 case EDGE32_OPF:
354 case EDGE32L_OPF: {
355 unsigned long ccr, tstate;
356
357 __asm__ __volatile__("subcc %1, %2, %%g0\n\t"
358 "rd %%ccr, %0"
359 : "=r" (ccr)
360 : "r" (orig_rs1), "r" (orig_rs2)
361 : "cc");
362 tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
363 regs->tstate = tstate | (ccr << 32UL);
364 }
365 };
366}
367
368static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
369{
370 unsigned long rs1, rs2, rd_val;
371 unsigned int bits, bits_mask;
372
373 maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
374 rs1 = fetch_reg(RS1(insn), regs);
375 rs2 = fetch_reg(RS2(insn), regs);
376
377 bits = (rs2 > 5 ? 5 : rs2);
378 bits_mask = (1UL << bits) - 1UL;
379
380 rd_val = ((((rs1 >> 11) & 0x3) << 0) |
381 (((rs1 >> 33) & 0x3) << 2) |
382 (((rs1 >> 55) & 0x1) << 4) |
383 (((rs1 >> 13) & 0xf) << 5) |
384 (((rs1 >> 35) & 0xf) << 9) |
385 (((rs1 >> 56) & 0xf) << 13) |
386 (((rs1 >> 17) & bits_mask) << 17) |
387 (((rs1 >> 39) & bits_mask) << (17 + bits)) |
388 (((rs1 >> 60) & 0xf) << (17 + (2*bits))));
389
390 switch (opf) {
391 case ARRAY16_OPF:
392 rd_val <<= 1;
393 break;
394
395 case ARRAY32_OPF:
396 rd_val <<= 2;
397 };
398
399 store_reg(regs, rd_val, RD(insn));
400}
401
402static void bmask(struct pt_regs *regs, unsigned int insn)
403{
404 unsigned long rs1, rs2, rd_val, gsr;
405
406 maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
407 rs1 = fetch_reg(RS1(insn), regs);
408 rs2 = fetch_reg(RS2(insn), regs);
409 rd_val = rs1 + rs2;
410
411 store_reg(regs, rd_val, RD(insn));
412
413 gsr = current_thread_info()->gsr[0] & 0xffffffff;
414 gsr |= rd_val << 32UL;
415 current_thread_info()->gsr[0] = gsr;
416}
417
418static void bshuffle(struct pt_regs *regs, unsigned int insn)
419{
420 struct fpustate *f = FPUSTATE;
421 unsigned long rs1, rs2, rd_val;
422 unsigned long bmask, i;
423
424 bmask = current_thread_info()->gsr[0] >> 32UL;
425
426 rs1 = fpd_regval(f, RS1(insn));
427 rs2 = fpd_regval(f, RS2(insn));
428
429 rd_val = 0UL;
430 for (i = 0; i < 8; i++) {
431 unsigned long which = (bmask >> (i * 4)) & 0xf;
432 unsigned long byte;
433
434 if (which < 8)
435 byte = (rs1 >> (which * 8)) & 0xff;
436 else
437 byte = (rs2 >> ((which-8)*8)) & 0xff;
438 rd_val |= (byte << (i * 8));
439 }
440
441 *fpd_regaddr(f, RD(insn)) = rd_val;
442}
443
444static void pdist(struct pt_regs *regs, unsigned int insn)
445{
446 struct fpustate *f = FPUSTATE;
447 unsigned long rs1, rs2, *rd, rd_val;
448 unsigned long i;
449
450 rs1 = fpd_regval(f, RS1(insn));
451 rs2 = fpd_regval(f, RS1(insn));
452 rd = fpd_regaddr(f, RD(insn));
453
454 rd_val = *rd;
455
456 for (i = 0; i < 8; i++) {
457 s16 s1, s2;
458
459 s1 = (rs1 >> (56 - (i * 8))) & 0xff;
460 s2 = (rs2 >> (56 - (i * 8))) & 0xff;
461
462 /* Absolute value of difference. */
463 s1 -= s2;
464 if (s1 < 0)
465 s1 = ~s1 + 1;
466
467 rd_val += s1;
468 }
469
470 *rd = rd_val;
471}
472
473static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
474{
475 struct fpustate *f = FPUSTATE;
476 unsigned long rs1, rs2, gsr, scale, rd_val;
477
478 gsr = current_thread_info()->gsr[0];
479 scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f);
480 switch (opf) {
481 case FPACK16_OPF: {
482 unsigned long byte;
483
484 rs2 = fpd_regval(f, RS2(insn));
485 rd_val = 0;
486 for (byte = 0; byte < 4; byte++) {
487 unsigned int val;
488 s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL;
489 int scaled = src << scale;
490 int from_fixed = scaled >> 7;
491
492 val = ((from_fixed < 0) ?
493 0 :
494 (from_fixed > 255) ?
495 255 : from_fixed);
496
497 rd_val |= (val << (8 * byte));
498 }
499 *fps_regaddr(f, RD(insn)) = rd_val;
500 break;
501 }
502
503 case FPACK32_OPF: {
504 unsigned long word;
505
506 rs1 = fpd_regval(f, RS1(insn));
507 rs2 = fpd_regval(f, RS2(insn));
508 rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL);
509 for (word = 0; word < 2; word++) {
510 unsigned long val;
511 s32 src = (rs2 >> (word * 32UL));
512 s64 scaled = src << scale;
513 s64 from_fixed = scaled >> 23;
514
515 val = ((from_fixed < 0) ?
516 0 :
517 (from_fixed > 255) ?
518 255 : from_fixed);
519
520 rd_val |= (val << (32 * word));
521 }
522 *fpd_regaddr(f, RD(insn)) = rd_val;
523 break;
524 }
525
526 case FPACKFIX_OPF: {
527 unsigned long word;
528
529 rs2 = fpd_regval(f, RS2(insn));
530
531 rd_val = 0;
532 for (word = 0; word < 2; word++) {
533 long val;
534 s32 src = (rs2 >> (word * 32UL));
535 s64 scaled = src << scale;
536 s64 from_fixed = scaled >> 16;
537
538 val = ((from_fixed < -32768) ?
539 -32768 :
540 (from_fixed > 32767) ?
541 32767 : from_fixed);
542
543 rd_val |= ((val & 0xffff) << (word * 16));
544 }
545 *fps_regaddr(f, RD(insn)) = rd_val;
546 break;
547 }
548
549 case FEXPAND_OPF: {
550 unsigned long byte;
551
552 rs2 = fps_regval(f, RS2(insn));
553
554 rd_val = 0;
555 for (byte = 0; byte < 4; byte++) {
556 unsigned long val;
557 u8 src = (rs2 >> (byte * 8)) & 0xff;
558
559 val = src << 4;
560
561 rd_val |= (val << (byte * 16));
562 }
563 *fpd_regaddr(f, RD(insn)) = rd_val;
564 break;
565 }
566
567 case FPMERGE_OPF: {
568 rs1 = fps_regval(f, RS1(insn));
569 rs2 = fps_regval(f, RS2(insn));
570
571 rd_val = (((rs2 & 0x000000ff) << 0) |
572 ((rs1 & 0x000000ff) << 8) |
573 ((rs2 & 0x0000ff00) << 8) |
574 ((rs1 & 0x0000ff00) << 16) |
575 ((rs2 & 0x00ff0000) << 16) |
576 ((rs1 & 0x00ff0000) << 24) |
577 ((rs2 & 0xff000000) << 24) |
578 ((rs1 & 0xff000000) << 32));
579 *fpd_regaddr(f, RD(insn)) = rd_val;
580 break;
581 }
582 };
583}
584
585static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
586{
587 struct fpustate *f = FPUSTATE;
588 unsigned long rs1, rs2, rd_val;
589
590 switch (opf) {
591 case FMUL8x16_OPF: {
592 unsigned long byte;
593
594 rs1 = fps_regval(f, RS1(insn));
595 rs2 = fpd_regval(f, RS2(insn));
596
597 rd_val = 0;
598 for (byte = 0; byte < 4; byte++) {
599 u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
600 s16 src2 = (rs2 >> (byte * 16)) & 0xffff;
601 u32 prod = src1 * src2;
602 u16 scaled = ((prod & 0x00ffff00) >> 8);
603
604 /* Round up. */
605 if (prod & 0x80)
606 scaled++;
607 rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
608 }
609
610 *fpd_regaddr(f, RD(insn)) = rd_val;
611 break;
612 }
613
614 case FMUL8x16AU_OPF:
615 case FMUL8x16AL_OPF: {
616 unsigned long byte;
617 s16 src2;
618
619 rs1 = fps_regval(f, RS1(insn));
620 rs2 = fps_regval(f, RS2(insn));
621
622 rd_val = 0;
623 src2 = (rs2 >> (opf == FMUL8x16AU_OPF) ? 16 : 0);
624 for (byte = 0; byte < 4; byte++) {
625 u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
626 u32 prod = src1 * src2;
627 u16 scaled = ((prod & 0x00ffff00) >> 8);
628
629 /* Round up. */
630 if (prod & 0x80)
631 scaled++;
632 rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
633 }
634
635 *fpd_regaddr(f, RD(insn)) = rd_val;
636 break;
637 }
638
639 case FMUL8SUx16_OPF:
640 case FMUL8ULx16_OPF: {
641 unsigned long byte, ushift;
642
643 rs1 = fpd_regval(f, RS1(insn));
644 rs2 = fpd_regval(f, RS2(insn));
645
646 rd_val = 0;
647 ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0;
648 for (byte = 0; byte < 4; byte++) {
649 u16 src1;
650 s16 src2;
651 u32 prod;
652 u16 scaled;
653
654 src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
655 src2 = ((rs2 >> (16 * byte)) & 0xffff);
656 prod = src1 * src2;
657 scaled = ((prod & 0x00ffff00) >> 8);
658
659 /* Round up. */
660 if (prod & 0x80)
661 scaled++;
662 rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
663 }
664
665 *fpd_regaddr(f, RD(insn)) = rd_val;
666 break;
667 }
668
669 case FMULD8SUx16_OPF:
670 case FMULD8ULx16_OPF: {
671 unsigned long byte, ushift;
672
673 rs1 = fps_regval(f, RS1(insn));
674 rs2 = fps_regval(f, RS2(insn));
675
676 rd_val = 0;
677 ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0;
678 for (byte = 0; byte < 2; byte++) {
679 u16 src1;
680 s16 src2;
681 u32 prod;
682 u16 scaled;
683
684 src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
685 src2 = ((rs2 >> (16 * byte)) & 0xffff);
686 prod = src1 * src2;
687 scaled = ((prod & 0x00ffff00) >> 8);
688
689 /* Round up. */
690 if (prod & 0x80)
691 scaled++;
692 rd_val |= ((scaled & 0xffffUL) <<
693 ((byte * 32UL) + 7UL));
694 }
695 *fpd_regaddr(f, RD(insn)) = rd_val;
696 break;
697 }
698 };
699}
700
701static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
702{
703 struct fpustate *f = FPUSTATE;
704 unsigned long rs1, rs2, rd_val, i;
705
706 rs1 = fpd_regval(f, RS1(insn));
707 rs2 = fpd_regval(f, RS2(insn));
708
709 rd_val = 0;
710
711 switch (opf) {
712 case FCMPGT16_OPF:
713 for (i = 0; i < 4; i++) {
714 s16 a = (rs1 >> (i * 16)) & 0xffff;
715 s16 b = (rs2 >> (i * 16)) & 0xffff;
716
717 if (a > b)
718 rd_val |= 1 << i;
719 }
720 break;
721
722 case FCMPGT32_OPF:
723 for (i = 0; i < 2; i++) {
724 s32 a = (rs1 >> (i * 32)) & 0xffff;
725 s32 b = (rs2 >> (i * 32)) & 0xffff;
726
727 if (a > b)
728 rd_val |= 1 << i;
729 }
730 break;
731
732 case FCMPLE16_OPF:
733 for (i = 0; i < 4; i++) {
734 s16 a = (rs1 >> (i * 16)) & 0xffff;
735 s16 b = (rs2 >> (i * 16)) & 0xffff;
736
737 if (a <= b)
738 rd_val |= 1 << i;
739 }
740 break;
741
742 case FCMPLE32_OPF:
743 for (i = 0; i < 2; i++) {
744 s32 a = (rs1 >> (i * 32)) & 0xffff;
745 s32 b = (rs2 >> (i * 32)) & 0xffff;
746
747 if (a <= b)
748 rd_val |= 1 << i;
749 }
750 break;
751
752 case FCMPNE16_OPF:
753 for (i = 0; i < 4; i++) {
754 s16 a = (rs1 >> (i * 16)) & 0xffff;
755 s16 b = (rs2 >> (i * 16)) & 0xffff;
756
757 if (a != b)
758 rd_val |= 1 << i;
759 }
760 break;
761
762 case FCMPNE32_OPF:
763 for (i = 0; i < 2; i++) {
764 s32 a = (rs1 >> (i * 32)) & 0xffff;
765 s32 b = (rs2 >> (i * 32)) & 0xffff;
766
767 if (a != b)
768 rd_val |= 1 << i;
769 }
770 break;
771
772 case FCMPEQ16_OPF:
773 for (i = 0; i < 4; i++) {
774 s16 a = (rs1 >> (i * 16)) & 0xffff;
775 s16 b = (rs2 >> (i * 16)) & 0xffff;
776
777 if (a == b)
778 rd_val |= 1 << i;
779 }
780 break;
781
782 case FCMPEQ32_OPF:
783 for (i = 0; i < 2; i++) {
784 s32 a = (rs1 >> (i * 32)) & 0xffff;
785 s32 b = (rs2 >> (i * 32)) & 0xffff;
786
787 if (a == b)
788 rd_val |= 1 << i;
789 }
790 break;
791 };
792
793 maybe_flush_windows(0, 0, RD(insn), 0);
794 store_reg(regs, rd_val, RD(insn));
795}
796
797/* Emulate the VIS instructions which are not implemented in
798 * hardware on Niagara.
799 */
800int vis_emul(struct pt_regs *regs, unsigned int insn)
801{
802 unsigned long pc = regs->tpc;
803 unsigned int opf;
804
805 BUG_ON(regs->tstate & TSTATE_PRIV);
806
807 if (test_thread_flag(TIF_32BIT))
808 pc = (u32)pc;
809
810 if (get_user(insn, (u32 __user *) pc))
811 return -EFAULT;
812
813 if ((insn & VIS_OPCODE_MASK) != VIS_OPCODE_VAL)
814 return -EINVAL;
815
816 opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
817 switch (opf) {
818 default:
819 return -EINVAL;
820
821 /* Pixel Formatting Instructions. */
822 case FPACK16_OPF:
823 case FPACK32_OPF:
824 case FPACKFIX_OPF:
825 case FEXPAND_OPF:
826 case FPMERGE_OPF:
827 pformat(regs, insn, opf);
828 break;
829
830 /* Partitioned Multiply Instructions */
831 case FMUL8x16_OPF:
832 case FMUL8x16AU_OPF:
833 case FMUL8x16AL_OPF:
834 case FMUL8SUx16_OPF:
835 case FMUL8ULx16_OPF:
836 case FMULD8SUx16_OPF:
837 case FMULD8ULx16_OPF:
838 pmul(regs, insn, opf);
839 break;
840
841 /* Pixel Compare Instructions */
842 case FCMPGT16_OPF:
843 case FCMPGT32_OPF:
844 case FCMPLE16_OPF:
845 case FCMPLE32_OPF:
846 case FCMPNE16_OPF:
847 case FCMPNE32_OPF:
848 case FCMPEQ16_OPF:
849 case FCMPEQ32_OPF:
850 pcmp(regs, insn, opf);
851 break;
852
853 /* Edge Handling Instructions */
854 case EDGE8_OPF:
855 case EDGE8N_OPF:
856 case EDGE8L_OPF:
857 case EDGE8LN_OPF:
858 case EDGE16_OPF:
859 case EDGE16N_OPF:
860 case EDGE16L_OPF:
861 case EDGE16LN_OPF:
862 case EDGE32_OPF:
863 case EDGE32N_OPF:
864 case EDGE32L_OPF:
865 case EDGE32LN_OPF:
866 edge(regs, insn, opf);
867 break;
868
869 /* Pixel Component Distance */
870 case PDIST_OPF:
871 pdist(regs, insn);
872 break;
873
874 /* Three-Dimensional Array Addressing Instructions */
875 case ARRAY8_OPF:
876 case ARRAY16_OPF:
877 case ARRAY32_OPF:
878 array(regs, insn, opf);
879 break;
880
881 /* Byte Mask and Shuffle Instructions */
882 case BMASK_OPF:
883 bmask(regs, insn);
884 break;
885
886 case BSHUFFLE_OPF:
887 bshuffle(regs, insn);
888 break;
889 };
890
891 regs->tpc = regs->tnpc;
892 regs->tnpc += 4;
893 return 0;
894}
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 467d13a0d5c1..b097379a49a8 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -70,6 +70,22 @@ SECTIONS
70 .con_initcall.init : { *(.con_initcall.init) } 70 .con_initcall.init : { *(.con_initcall.init) }
71 __con_initcall_end = .; 71 __con_initcall_end = .;
72 SECURITY_INIT 72 SECURITY_INIT
73 . = ALIGN(4);
74 __tsb_ldquad_phys_patch = .;
75 .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) }
76 __tsb_ldquad_phys_patch_end = .;
77 __tsb_phys_patch = .;
78 .tsb_phys_patch : { *(.tsb_phys_patch) }
79 __tsb_phys_patch_end = .;
80 __cpuid_patch = .;
81 .cpuid_patch : { *(.cpuid_patch) }
82 __cpuid_patch_end = .;
83 __sun4v_1insn_patch = .;
84 .sun4v_1insn_patch : { *(.sun4v_1insn_patch) }
85 __sun4v_1insn_patch_end = .;
86 __sun4v_2insn_patch = .;
87 .sun4v_2insn_patch : { *(.sun4v_2insn_patch) }
88 __sun4v_2insn_patch_end = .;
73 . = ALIGN(8192); 89 . = ALIGN(8192);
74 __initramfs_start = .; 90 __initramfs_start = .;
75 .init.ramfs : { *(.init.ramfs) } 91 .init.ramfs : { *(.init.ramfs) }
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 39160926267b..c4aa110a10e5 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -1,8 +1,6 @@
1/* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $ 1/* winfixup.S: Handle cases where user stack pointer is found to be bogus.
2 * 2 *
3 * winfixup.S: Handle cases where user stack pointer is found to be bogus. 3 * Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net)
4 *
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */ 4 */
7 5
8#include <asm/asi.h> 6#include <asm/asi.h>
@@ -15,374 +13,144 @@
15 13
16 .text 14 .text
17 15
18set_pcontext: 16 /* It used to be the case that these register window fault
19 sethi %hi(sparc64_kern_pri_context), %l1 17 * handlers could run via the save and restore instructions
20 ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 18 * done by the trap entry and exit code. They now do the
21 mov PRIMARY_CONTEXT, %g1 19 * window spill/fill by hand, so that case no longer can occur.
22 stxa %l1, [%g1] ASI_DMMU 20 */
23 flush %g6
24 retl
25 nop
26 21
27 .align 32 22 .align 32
28
29 /* Here are the rules, pay attention.
30 *
31 * The kernel is disallowed from touching user space while
32 * the trap level is greater than zero, except for from within
33 * the window spill/fill handlers. This must be followed
34 * so that we can easily detect the case where we tried to
35 * spill/fill with a bogus (or unmapped) user stack pointer.
36 *
37 * These are layed out in a special way for cache reasons,
38 * don't touch...
39 */
40 .globl fill_fixup, spill_fixup
41fill_fixup: 23fill_fixup:
42 rdpr %tstate, %g1 24 TRAP_LOAD_THREAD_REG(%g6, %g1)
43 andcc %g1, TSTATE_PRIV, %g0 25 rdpr %tstate, %g1
44 or %g4, FAULT_CODE_WINFIXUP, %g4 26 and %g1, TSTATE_CWP, %g1
45 be,pt %xcc, window_scheisse_from_user_common 27 or %g4, FAULT_CODE_WINFIXUP, %g4
46 and %g1, TSTATE_CWP, %g1 28 stb %g4, [%g6 + TI_FAULT_CODE]
47 29 stx %g5, [%g6 + TI_FAULT_ADDR]
48 /* This is the extremely complex case, but it does happen from 30 wrpr %g1, %cwp
49 * time to time if things are just right. Essentially the restore 31 ba,pt %xcc, etrap
50 * done in rtrap right before going back to user mode, with tl=1 32 rd %pc, %g7
51 * and that levels trap stack registers all setup, took a fill trap, 33 call do_sparc64_fault
52 * the user stack was not mapped in the tlb, and tlb miss occurred, 34 add %sp, PTREGS_OFF, %o0
53 * the pte found was not valid, and a simple ref bit watch update 35 ba,pt %xcc, rtrap_clr_l6
54 * could not satisfy the miss, so we got here.
55 *
56 * We must carefully unwind the state so we get back to tl=0, preserve
57 * all the register values we were going to give to the user. Luckily
58 * most things are where they need to be, we also have the address
59 * which triggered the fault handy as well.
60 *
61 * Also note that we must preserve %l5 and %l6. If the user was
62 * returning from a system call, we must make it look this way
63 * after we process the fill fault on the users stack.
64 *
65 * First, get into the window where the original restore was executed.
66 */
67
68 rdpr %wstate, %g2 ! Grab user mode wstate.
69 wrpr %g1, %cwp ! Get into the right window.
70 sll %g2, 3, %g2 ! NORMAL-->OTHER
71
72 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
73 wrpr %g2, 0x0, %wstate ! This must be consistent.
74 wrpr %g0, 0x0, %otherwin ! We know this.
75 call set_pcontext ! Change contexts...
76 nop 36 nop
77 rdpr %pstate, %l1 ! Prepare to change globals.
78 mov %g6, %o7 ! Get current.
79
80 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
81 stb %g4, [%g6 + TI_FAULT_CODE]
82 stx %g5, [%g6 + TI_FAULT_ADDR]
83 wrpr %g0, 0x0, %tl ! Out of trap levels.
84 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
85 mov %o7, %g6
86 ldx [%g6 + TI_TASK], %g4
87#ifdef CONFIG_SMP
88 mov TSB_REG, %g1
89 ldxa [%g1] ASI_IMMU, %g5
90#endif
91 37
92 /* This is the same as below, except we handle this a bit special 38 /* Be very careful about usage of the trap globals here.
93 * since we must preserve %l5 and %l6, see comment above. 39 * You cannot touch %g5 as that has the fault information.
94 */
95 call do_sparc64_fault
96 add %sp, PTREGS_OFF, %o0
97 ba,pt %xcc, rtrap
98 nop ! yes, nop is correct
99
100 /* Be very careful about usage of the alternate globals here.
101 * You cannot touch %g4/%g5 as that has the fault information
102 * should this be from usermode. Also be careful for the case
103 * where we get here from the save instruction in etrap.S when
104 * coming from either user or kernel (does not matter which, it
105 * is the same problem in both cases). Essentially this means
106 * do not touch %g7 or %g2 so we handle the two cases fine.
107 */ 40 */
108spill_fixup: 41spill_fixup:
109 ldx [%g6 + TI_FLAGS], %g1 42spill_fixup_mna:
110 andcc %g1, _TIF_32BIT, %g0 43spill_fixup_dax:
111 ldub [%g6 + TI_WSAVED], %g1 44 TRAP_LOAD_THREAD_REG(%g6, %g1)
112 45 ldx [%g6 + TI_FLAGS], %g1
113 sll %g1, 3, %g3 46 andcc %g1, _TIF_32BIT, %g0
114 add %g6, %g3, %g3 47 ldub [%g6 + TI_WSAVED], %g1
115 stx %sp, [%g3 + TI_RWIN_SPTRS] 48 sll %g1, 3, %g3
116 sll %g1, 7, %g3 49 add %g6, %g3, %g3
117 bne,pt %xcc, 1f 50 stx %sp, [%g3 + TI_RWIN_SPTRS]
118 add %g6, %g3, %g3 51 sll %g1, 7, %g3
119 stx %l0, [%g3 + TI_REG_WINDOW + 0x00] 52 bne,pt %xcc, 1f
120 stx %l1, [%g3 + TI_REG_WINDOW + 0x08] 53 add %g6, %g3, %g3
121 54 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
122 stx %l2, [%g3 + TI_REG_WINDOW + 0x10] 55 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
123 stx %l3, [%g3 + TI_REG_WINDOW + 0x18] 56 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
124 stx %l4, [%g3 + TI_REG_WINDOW + 0x20] 57 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
125 stx %l5, [%g3 + TI_REG_WINDOW + 0x28] 58 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
126 stx %l6, [%g3 + TI_REG_WINDOW + 0x30] 59 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
127 stx %l7, [%g3 + TI_REG_WINDOW + 0x38] 60 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
128 stx %i0, [%g3 + TI_REG_WINDOW + 0x40] 61 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
129 stx %i1, [%g3 + TI_REG_WINDOW + 0x48] 62 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
130 63 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
131 stx %i2, [%g3 + TI_REG_WINDOW + 0x50] 64 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
132 stx %i3, [%g3 + TI_REG_WINDOW + 0x58] 65 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
133 stx %i4, [%g3 + TI_REG_WINDOW + 0x60] 66 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
134 stx %i5, [%g3 + TI_REG_WINDOW + 0x68] 67 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
135 stx %i6, [%g3 + TI_REG_WINDOW + 0x70] 68 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
136 b,pt %xcc, 2f 69 ba,pt %xcc, 2f
137 stx %i7, [%g3 + TI_REG_WINDOW + 0x78] 70 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
1381: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] 711: stw %l0, [%g3 + TI_REG_WINDOW + 0x00]
139 72 stw %l1, [%g3 + TI_REG_WINDOW + 0x04]
140 stw %l1, [%g3 + TI_REG_WINDOW + 0x04] 73 stw %l2, [%g3 + TI_REG_WINDOW + 0x08]
141 stw %l2, [%g3 + TI_REG_WINDOW + 0x08] 74 stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]
142 stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] 75 stw %l4, [%g3 + TI_REG_WINDOW + 0x10]
143 stw %l4, [%g3 + TI_REG_WINDOW + 0x10] 76 stw %l5, [%g3 + TI_REG_WINDOW + 0x14]
144 stw %l5, [%g3 + TI_REG_WINDOW + 0x14] 77 stw %l6, [%g3 + TI_REG_WINDOW + 0x18]
145 stw %l6, [%g3 + TI_REG_WINDOW + 0x18] 78 stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]
146 stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] 79 stw %i0, [%g3 + TI_REG_WINDOW + 0x20]
147 stw %i0, [%g3 + TI_REG_WINDOW + 0x20] 80 stw %i1, [%g3 + TI_REG_WINDOW + 0x24]
148 81 stw %i2, [%g3 + TI_REG_WINDOW + 0x28]
149 stw %i1, [%g3 + TI_REG_WINDOW + 0x24] 82 stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]
150 stw %i2, [%g3 + TI_REG_WINDOW + 0x28] 83 stw %i4, [%g3 + TI_REG_WINDOW + 0x30]
151 stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] 84 stw %i5, [%g3 + TI_REG_WINDOW + 0x34]
152 stw %i4, [%g3 + TI_REG_WINDOW + 0x30] 85 stw %i6, [%g3 + TI_REG_WINDOW + 0x38]
153 stw %i5, [%g3 + TI_REG_WINDOW + 0x34] 86 stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]
154 stw %i6, [%g3 + TI_REG_WINDOW + 0x38] 872: add %g1, 1, %g1
155 stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] 88 stb %g1, [%g6 + TI_WSAVED]
1562: add %g1, 1, %g1 89 rdpr %tstate, %g1
157 90 andcc %g1, TSTATE_PRIV, %g0
158 stb %g1, [%g6 + TI_WSAVED]
159 rdpr %tstate, %g1
160 andcc %g1, TSTATE_PRIV, %g0
161 saved 91 saved
162 and %g1, TSTATE_CWP, %g1 92 be,pn %xcc, 1f
163 be,pn %xcc, window_scheisse_from_user_common 93 and %g1, TSTATE_CWP, %g1
164 mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
165 retry 94 retry
951: mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
96 stb %g4, [%g6 + TI_FAULT_CODE]
97 stx %g5, [%g6 + TI_FAULT_ADDR]
98 wrpr %g1, %cwp
99 ba,pt %xcc, etrap
100 rd %pc, %g7
101 call do_sparc64_fault
102 add %sp, PTREGS_OFF, %o0
103 ba,a,pt %xcc, rtrap_clr_l6
166 104
167window_scheisse_from_user_common:
168 stb %g4, [%g6 + TI_FAULT_CODE]
169 stx %g5, [%g6 + TI_FAULT_ADDR]
170 wrpr %g1, %cwp
171 ba,pt %xcc, etrap
172 rd %pc, %g7
173 call do_sparc64_fault
174 add %sp, PTREGS_OFF, %o0
175 ba,a,pt %xcc, rtrap_clr_l6
176
177 .globl winfix_mna, fill_fixup_mna, spill_fixup_mna
178winfix_mna: 105winfix_mna:
179 andn %g3, 0x7f, %g3 106 andn %g3, 0x7f, %g3
180 add %g3, 0x78, %g3 107 add %g3, 0x78, %g3
181 wrpr %g3, %tnpc 108 wrpr %g3, %tnpc
182 done 109 done
183fill_fixup_mna:
184 rdpr %tstate, %g1
185 andcc %g1, TSTATE_PRIV, %g0
186 be,pt %xcc, window_mna_from_user_common
187 and %g1, TSTATE_CWP, %g1
188 110
189 /* Please, see fill_fixup commentary about why we must preserve 111fill_fixup_mna:
190 * %l5 and %l6 to preserve absolute correct semantics. 112 rdpr %tstate, %g1
191 */ 113 and %g1, TSTATE_CWP, %g1
192 rdpr %wstate, %g2 ! Grab user mode wstate. 114 wrpr %g1, %cwp
193 wrpr %g1, %cwp ! Get into the right window. 115 ba,pt %xcc, etrap
194 sll %g2, 3, %g2 ! NORMAL-->OTHER 116 rd %pc, %g7
195 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. 117 sethi %hi(tlb_type), %g1
196 118 lduw [%g1 + %lo(tlb_type)], %g1
197 wrpr %g2, 0x0, %wstate ! This must be consistent. 119 cmp %g1, 3
198 wrpr %g0, 0x0, %otherwin ! We know this. 120 bne,pt %icc, 1f
199 call set_pcontext ! Change contexts... 121 add %sp, PTREGS_OFF, %o0
122 mov %l4, %o2
123 call sun4v_do_mna
124 mov %l5, %o1
125 ba,a,pt %xcc, rtrap_clr_l6
1261: mov %l4, %o1
127 mov %l5, %o2
128 call mem_address_unaligned
200 nop 129 nop
201 rdpr %pstate, %l1 ! Prepare to change globals. 130 ba,a,pt %xcc, rtrap_clr_l6
202 mov %g4, %o2 ! Setup args for
203 mov %g5, %o1 ! final call to mem_address_unaligned.
204 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
205 131
206 mov %g6, %o7 ! Stash away current.
207 wrpr %g0, 0x0, %tl ! Out of trap levels.
208 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
209 mov %o7, %g6 ! Get current back.
210 ldx [%g6 + TI_TASK], %g4 ! Finish it.
211#ifdef CONFIG_SMP
212 mov TSB_REG, %g1
213 ldxa [%g1] ASI_IMMU, %g5
214#endif
215 call mem_address_unaligned
216 add %sp, PTREGS_OFF, %o0
217
218 b,pt %xcc, rtrap
219 nop ! yes, the nop is correct
220spill_fixup_mna:
221 ldx [%g6 + TI_FLAGS], %g1
222 andcc %g1, _TIF_32BIT, %g0
223 ldub [%g6 + TI_WSAVED], %g1
224 sll %g1, 3, %g3
225 add %g6, %g3, %g3
226 stx %sp, [%g3 + TI_RWIN_SPTRS]
227
228 sll %g1, 7, %g3
229 bne,pt %xcc, 1f
230 add %g6, %g3, %g3
231 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
232 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
233 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
234 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
235 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
236
237 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
238 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
239 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
240 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
241 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
242 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
243 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
244 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
245
246 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
247 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
248 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
249 b,pt %xcc, 2f
250 add %g1, 1, %g1
2511: std %l0, [%g3 + TI_REG_WINDOW + 0x00]
252 std %l2, [%g3 + TI_REG_WINDOW + 0x08]
253 std %l4, [%g3 + TI_REG_WINDOW + 0x10]
254
255 std %l6, [%g3 + TI_REG_WINDOW + 0x18]
256 std %i0, [%g3 + TI_REG_WINDOW + 0x20]
257 std %i2, [%g3 + TI_REG_WINDOW + 0x28]
258 std %i4, [%g3 + TI_REG_WINDOW + 0x30]
259 std %i6, [%g3 + TI_REG_WINDOW + 0x38]
260 add %g1, 1, %g1
2612: stb %g1, [%g6 + TI_WSAVED]
262 rdpr %tstate, %g1
263
264 andcc %g1, TSTATE_PRIV, %g0
265 saved
266 be,pn %xcc, window_mna_from_user_common
267 and %g1, TSTATE_CWP, %g1
268 retry
269window_mna_from_user_common:
270 wrpr %g1, %cwp
271 sethi %hi(109f), %g7
272 ba,pt %xcc, etrap
273109: or %g7, %lo(109b), %g7
274 mov %l4, %o2
275 mov %l5, %o1
276 call mem_address_unaligned
277 add %sp, PTREGS_OFF, %o0
278 ba,pt %xcc, rtrap
279 clr %l6
280
281 /* These are only needed for 64-bit mode processes which
282 * put their stack pointer into the VPTE area and there
283 * happens to be a VPTE tlb entry mapped there during
284 * a spill/fill trap to that stack frame.
285 */
286 .globl winfix_dax, fill_fixup_dax, spill_fixup_dax
287winfix_dax: 132winfix_dax:
288 andn %g3, 0x7f, %g3 133 andn %g3, 0x7f, %g3
289 add %g3, 0x74, %g3 134 add %g3, 0x74, %g3
290 wrpr %g3, %tnpc 135 wrpr %g3, %tnpc
291 done 136 done
292fill_fixup_dax:
293 rdpr %tstate, %g1
294 andcc %g1, TSTATE_PRIV, %g0
295 be,pt %xcc, window_dax_from_user_common
296 and %g1, TSTATE_CWP, %g1
297
298 /* Please, see fill_fixup commentary about why we must preserve
299 * %l5 and %l6 to preserve absolute correct semantics.
300 */
301 rdpr %wstate, %g2 ! Grab user mode wstate.
302 wrpr %g1, %cwp ! Get into the right window.
303 sll %g2, 3, %g2 ! NORMAL-->OTHER
304 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
305 137
306 wrpr %g2, 0x0, %wstate ! This must be consistent. 138fill_fixup_dax:
307 wrpr %g0, 0x0, %otherwin ! We know this. 139 rdpr %tstate, %g1
308 call set_pcontext ! Change contexts... 140 and %g1, TSTATE_CWP, %g1
141 wrpr %g1, %cwp
142 ba,pt %xcc, etrap
143 rd %pc, %g7
144 sethi %hi(tlb_type), %g1
145 mov %l4, %o1
146 lduw [%g1 + %lo(tlb_type)], %g1
147 mov %l5, %o2
148 cmp %g1, 3
149 bne,pt %icc, 1f
150 add %sp, PTREGS_OFF, %o0
151 call sun4v_data_access_exception
309 nop 152 nop
310 rdpr %pstate, %l1 ! Prepare to change globals. 153 ba,a,pt %xcc, rtrap_clr_l6
311 mov %g4, %o1 ! Setup args for 1541: call spitfire_data_access_exception
312 mov %g5, %o2 ! final call to spitfire_data_access_exception. 155 nop
313 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO 156 ba,a,pt %xcc, rtrap_clr_l6
314
315 mov %g6, %o7 ! Stash away current.
316 wrpr %g0, 0x0, %tl ! Out of trap levels.
317 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
318 mov %o7, %g6 ! Get current back.
319 ldx [%g6 + TI_TASK], %g4 ! Finish it.
320#ifdef CONFIG_SMP
321 mov TSB_REG, %g1
322 ldxa [%g1] ASI_IMMU, %g5
323#endif
324 call spitfire_data_access_exception
325 add %sp, PTREGS_OFF, %o0
326
327 b,pt %xcc, rtrap
328 nop ! yes, the nop is correct
329spill_fixup_dax:
330 ldx [%g6 + TI_FLAGS], %g1
331 andcc %g1, _TIF_32BIT, %g0
332 ldub [%g6 + TI_WSAVED], %g1
333 sll %g1, 3, %g3
334 add %g6, %g3, %g3
335 stx %sp, [%g3 + TI_RWIN_SPTRS]
336
337 sll %g1, 7, %g3
338 bne,pt %xcc, 1f
339 add %g6, %g3, %g3
340 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
341 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
342 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
343 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
344 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
345
346 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
347 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
348 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
349 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
350 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
351 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
352 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
353 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
354
355 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
356 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
357 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
358 b,pt %xcc, 2f
359 add %g1, 1, %g1
3601: std %l0, [%g3 + TI_REG_WINDOW + 0x00]
361 std %l2, [%g3 + TI_REG_WINDOW + 0x08]
362 std %l4, [%g3 + TI_REG_WINDOW + 0x10]
363
364 std %l6, [%g3 + TI_REG_WINDOW + 0x18]
365 std %i0, [%g3 + TI_REG_WINDOW + 0x20]
366 std %i2, [%g3 + TI_REG_WINDOW + 0x28]
367 std %i4, [%g3 + TI_REG_WINDOW + 0x30]
368 std %i6, [%g3 + TI_REG_WINDOW + 0x38]
369 add %g1, 1, %g1
3702: stb %g1, [%g6 + TI_WSAVED]
371 rdpr %tstate, %g1
372
373 andcc %g1, TSTATE_PRIV, %g0
374 saved
375 be,pn %xcc, window_dax_from_user_common
376 and %g1, TSTATE_CWP, %g1
377 retry
378window_dax_from_user_common:
379 wrpr %g1, %cwp
380 sethi %hi(109f), %g7
381 ba,pt %xcc, etrap
382109: or %g7, %lo(109b), %g7
383 mov %l4, %o1
384 mov %l5, %o2
385 call spitfire_data_access_exception
386 add %sp, PTREGS_OFF, %o0
387 ba,pt %xcc, rtrap
388 clr %l6
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index c295806500f7..8812ded19f01 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -11,6 +11,8 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
11 VISsave.o atomic.o bitops.o \ 11 VISsave.o atomic.o bitops.o \
12 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ 12 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
13 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ 13 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
14 NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \
15 NGpage.o NGbzero.o \
14 copy_in_user.o user_fixup.o memmove.o \ 16 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 17 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
16 18
diff --git a/arch/sparc64/lib/NGbzero.S b/arch/sparc64/lib/NGbzero.S
new file mode 100644
index 000000000000..e86baece5cc8
--- /dev/null
+++ b/arch/sparc64/lib/NGbzero.S
@@ -0,0 +1,163 @@
1/* NGbzero.S: Niagara optimized memset/clear_user.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5#include <asm/asi.h>
6
7#define EX_ST(x,y) \
898: x,y; \
9 .section .fixup; \
10 .align 4; \
1199: retl; \
12 mov %o1, %o0; \
13 .section __ex_table; \
14 .align 4; \
15 .word 98b, 99b; \
16 .text; \
17 .align 4;
18
19 .text
20
21 .globl NGmemset
22 .type NGmemset, #function
23NGmemset: /* %o0=buf, %o1=pat, %o2=len */
24 and %o1, 0xff, %o3
25 mov %o2, %o1
26 sllx %o3, 8, %g1
27 or %g1, %o3, %o2
28 sllx %o2, 16, %g1
29 or %g1, %o2, %o2
30 sllx %o2, 32, %g1
31 ba,pt %xcc, 1f
32 or %g1, %o2, %o2
33
34 .globl NGbzero
35 .type NGbzero, #function
36NGbzero:
37 clr %o2
381: brz,pn %o1, NGbzero_return
39 mov %o0, %o3
40
41 /* %o5: saved %asi, restored at NGbzero_done
42 * %g7: store-init %asi to use
43 * %o4: non-store-init %asi to use
44 */
45 rd %asi, %o5
46 mov ASI_BLK_INIT_QUAD_LDD_P, %g7
47 mov ASI_P, %o4
48 wr %o4, 0x0, %asi
49
50NGbzero_from_clear_user:
51 cmp %o1, 15
52 bl,pn %icc, NGbzero_tiny
53 andcc %o0, 0x7, %g1
54 be,pt %xcc, 2f
55 mov 8, %g2
56 sub %g2, %g1, %g1
57 sub %o1, %g1, %o1
581: EX_ST(stba %o2, [%o0 + 0x00] %asi)
59 subcc %g1, 1, %g1
60 bne,pt %xcc, 1b
61 add %o0, 1, %o0
622: cmp %o1, 128
63 bl,pn %icc, NGbzero_medium
64 andcc %o0, (64 - 1), %g1
65 be,pt %xcc, NGbzero_pre_loop
66 mov 64, %g2
67 sub %g2, %g1, %g1
68 sub %o1, %g1, %o1
691: EX_ST(stxa %o2, [%o0 + 0x00] %asi)
70 subcc %g1, 8, %g1
71 bne,pt %xcc, 1b
72 add %o0, 8, %o0
73
74NGbzero_pre_loop:
75 wr %g7, 0x0, %asi
76 andn %o1, (64 - 1), %g1
77 sub %o1, %g1, %o1
78NGbzero_loop:
79 EX_ST(stxa %o2, [%o0 + 0x00] %asi)
80 EX_ST(stxa %o2, [%o0 + 0x08] %asi)
81 EX_ST(stxa %o2, [%o0 + 0x10] %asi)
82 EX_ST(stxa %o2, [%o0 + 0x18] %asi)
83 EX_ST(stxa %o2, [%o0 + 0x20] %asi)
84 EX_ST(stxa %o2, [%o0 + 0x28] %asi)
85 EX_ST(stxa %o2, [%o0 + 0x30] %asi)
86 EX_ST(stxa %o2, [%o0 + 0x38] %asi)
87 subcc %g1, 64, %g1
88 bne,pt %xcc, NGbzero_loop
89 add %o0, 64, %o0
90
91 wr %o4, 0x0, %asi
92 brz,pn %o1, NGbzero_done
93NGbzero_medium:
94 andncc %o1, 0x7, %g1
95 be,pn %xcc, 2f
96 sub %o1, %g1, %o1
971: EX_ST(stxa %o2, [%o0 + 0x00] %asi)
98 subcc %g1, 8, %g1
99 bne,pt %xcc, 1b
100 add %o0, 8, %o0
1012: brz,pt %o1, NGbzero_done
102 nop
103
104NGbzero_tiny:
1051: EX_ST(stba %o2, [%o0 + 0x00] %asi)
106 subcc %o1, 1, %o1
107 bne,pt %icc, 1b
108 add %o0, 1, %o0
109
110 /* fallthrough */
111
112NGbzero_done:
113 wr %o5, 0x0, %asi
114
115NGbzero_return:
116 retl
117 mov %o3, %o0
118 .size NGbzero, .-NGbzero
119 .size NGmemset, .-NGmemset
120
121 .globl NGclear_user
122 .type NGclear_user, #function
123NGclear_user: /* %o0=buf, %o1=len */
124 rd %asi, %o5
125 brz,pn %o1, NGbzero_done
126 clr %o3
127 cmp %o5, ASI_AIUS
128 bne,pn %icc, NGbzero
129 clr %o2
130 mov ASI_BLK_INIT_QUAD_LDD_AIUS, %g7
131 ba,pt %xcc, NGbzero_from_clear_user
132 mov ASI_AIUS, %o4
133 .size NGclear_user, .-NGclear_user
134
135#define BRANCH_ALWAYS 0x10680000
136#define NOP 0x01000000
137#define NG_DO_PATCH(OLD, NEW) \
138 sethi %hi(NEW), %g1; \
139 or %g1, %lo(NEW), %g1; \
140 sethi %hi(OLD), %g2; \
141 or %g2, %lo(OLD), %g2; \
142 sub %g1, %g2, %g1; \
143 sethi %hi(BRANCH_ALWAYS), %g3; \
144 sll %g1, 11, %g1; \
145 srl %g1, 11 + 2, %g1; \
146 or %g3, %lo(BRANCH_ALWAYS), %g3; \
147 or %g3, %g1, %g3; \
148 stw %g3, [%g2]; \
149 sethi %hi(NOP), %g3; \
150 or %g3, %lo(NOP), %g3; \
151 stw %g3, [%g2 + 0x4]; \
152 flush %g2;
153
154 .globl niagara_patch_bzero
155 .type niagara_patch_bzero,#function
156niagara_patch_bzero:
157 NG_DO_PATCH(memset, NGmemset)
158 NG_DO_PATCH(__bzero, NGbzero)
159 NG_DO_PATCH(__clear_user, NGclear_user)
160 NG_DO_PATCH(tsb_init, NGtsb_init)
161 retl
162 nop
163 .size niagara_patch_bzero,.-niagara_patch_bzero
diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S
new file mode 100644
index 000000000000..2d93456f76dd
--- /dev/null
+++ b/arch/sparc64/lib/NGcopy_from_user.S
@@ -0,0 +1,37 @@
1/* NGcopy_from_user.S: Niagara optimized copy from userspace.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#define EX_LD(x) \
798: x; \
8 .section .fixup; \
9 .align 4; \
1099: wr %g0, ASI_AIUS, %asi;\
11 retl; \
12 mov 1, %o0; \
13 .section __ex_table,"a";\
14 .align 4; \
15 .word 98b, 99b; \
16 .text; \
17 .align 4;
18
19#ifndef ASI_AIUS
20#define ASI_AIUS 0x11
21#endif
22
23#define FUNC_NAME NGcopy_from_user
24#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest
25#define LOAD_TWIN(addr_reg,dest0,dest1) \
26 ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0
27#define EX_RETVAL(x) 0
28
29#ifdef __KERNEL__
30#define PREAMBLE \
31 rd %asi, %g1; \
32 cmp %g1, ASI_AIUS; \
33 bne,pn %icc, memcpy_user_stub; \
34 nop
35#endif
36
37#include "NGmemcpy.S"
diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S
new file mode 100644
index 000000000000..34112d5054ef
--- /dev/null
+++ b/arch/sparc64/lib/NGcopy_to_user.S
@@ -0,0 +1,40 @@
1/* NGcopy_to_user.S: Niagara optimized copy to userspace.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#define EX_ST(x) \
798: x; \
8 .section .fixup; \
9 .align 4; \
1099: wr %g0, ASI_AIUS, %asi;\
11 retl; \
12 mov 1, %o0; \
13 .section __ex_table,"a";\
14 .align 4; \
15 .word 98b, 99b; \
16 .text; \
17 .align 4;
18
19#ifndef ASI_AIUS
20#define ASI_AIUS 0x11
21#endif
22
23#define FUNC_NAME NGcopy_to_user
24#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
25#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS
26#define EX_RETVAL(x) 0
27
28#ifdef __KERNEL__
29 /* Writing to %asi is _expensive_ so we hardcode it.
30 * Reading %asi to check for KERNEL_DS is comparatively
31 * cheap.
32 */
33#define PREAMBLE \
34 rd %asi, %g1; \
35 cmp %g1, ASI_AIUS; \
36 bne,pn %icc, memcpy_user_stub; \
37 nop
38#endif
39
40#include "NGmemcpy.S"
diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S
new file mode 100644
index 000000000000..8e522b3dc095
--- /dev/null
+++ b/arch/sparc64/lib/NGmemcpy.S
@@ -0,0 +1,368 @@
1/* NGmemcpy.S: Niagara optimized memcpy.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#ifdef __KERNEL__
7#include <asm/asi.h>
8#include <asm/thread_info.h>
9#define GLOBAL_SPARE %g7
10#define RESTORE_ASI(TMP) \
11 ldub [%g6 + TI_CURRENT_DS], TMP; \
12 wr TMP, 0x0, %asi;
13#else
14#define GLOBAL_SPARE %g5
15#define RESTORE_ASI(TMP) \
16 wr %g0, ASI_PNF, %asi
17#endif
18
19#ifndef STORE_ASI
20#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
21#endif
22
23#ifndef EX_LD
24#define EX_LD(x) x
25#endif
26
27#ifndef EX_ST
28#define EX_ST(x) x
29#endif
30
31#ifndef EX_RETVAL
32#define EX_RETVAL(x) x
33#endif
34
35#ifndef LOAD
36#ifndef MEMCPY_DEBUG
37#define LOAD(type,addr,dest) type [addr], dest
38#else
39#define LOAD(type,addr,dest) type##a [addr] 0x80, dest
40#endif
41#endif
42
43#ifndef LOAD_TWIN
44#define LOAD_TWIN(addr_reg,dest0,dest1) \
45 ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_P, dest0
46#endif
47
48#ifndef STORE
49#define STORE(type,src,addr) type src, [addr]
50#endif
51
52#ifndef STORE_INIT
53#define STORE_INIT(src,addr) stxa src, [addr] %asi
54#endif
55
56#ifndef FUNC_NAME
57#define FUNC_NAME NGmemcpy
58#endif
59
60#ifndef PREAMBLE
61#define PREAMBLE
62#endif
63
64#ifndef XCC
65#define XCC xcc
66#endif
67
68 .register %g2,#scratch
69 .register %g3,#scratch
70
71 .text
72 .align 64
73
74 .globl FUNC_NAME
75 .type FUNC_NAME,#function
76FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
77 srlx %o2, 31, %g2
78 cmp %g2, 0
79 tne %xcc, 5
80 PREAMBLE
81 mov %o0, GLOBAL_SPARE
82 cmp %o2, 0
83 be,pn %XCC, 85f
84 or %o0, %o1, %o3
85 cmp %o2, 16
86 blu,a,pn %XCC, 80f
87 or %o3, %o2, %o3
88
89 /* 2 blocks (128 bytes) is the minimum we can do the block
90 * copy with. We need to ensure that we'll iterate at least
91 * once in the block copy loop. At worst we'll need to align
92 * the destination to a 64-byte boundary which can chew up
93 * to (64 - 1) bytes from the length before we perform the
94 * block copy loop.
95 */
96 cmp %o2, (2 * 64)
97 blu,pt %XCC, 70f
98 andcc %o3, 0x7, %g0
99
100 /* %o0: dst
101 * %o1: src
102 * %o2: len (known to be >= 128)
103 *
104 * The block copy loops will use %o4/%o5,%g2/%g3 as
105 * temporaries while copying the data.
106 */
107
108 LOAD(prefetch, %o1, #one_read)
109 wr %g0, STORE_ASI, %asi
110
111 /* Align destination on 64-byte boundary. */
112 andcc %o0, (64 - 1), %o4
113 be,pt %XCC, 2f
114 sub %o4, 64, %o4
115 sub %g0, %o4, %o4 ! bytes to align dst
116 sub %o2, %o4, %o2
1171: subcc %o4, 1, %o4
118 EX_LD(LOAD(ldub, %o1, %g1))
119 EX_ST(STORE(stb, %g1, %o0))
120 add %o1, 1, %o1
121 bne,pt %XCC, 1b
122 add %o0, 1, %o0
123
124 /* If the source is on a 16-byte boundary we can do
125 * the direct block copy loop. If it is 8-byte aligned
126 * we can do the 16-byte loads offset by -8 bytes and the
127 * init stores offset by one register.
128 *
129 * If the source is not even 8-byte aligned, we need to do
130 * shifting and masking (basically integer faligndata).
131 *
132 * The careful bit with init stores is that if we store
133 * to any part of the cache line we have to store the whole
134 * cacheline else we can end up with corrupt L2 cache line
135 * contents. Since the loop works on 64-bytes of 64-byte
136 * aligned store data at a time, this is easy to ensure.
137 */
1382:
139 andcc %o1, (16 - 1), %o4
140 andn %o2, (64 - 1), %g1 ! block copy loop iterator
141 sub %o2, %g1, %o2 ! final sub-block copy bytes
142 be,pt %XCC, 50f
143 cmp %o4, 8
144 be,a,pt %XCC, 10f
145 sub %o1, 0x8, %o1
146
147 /* Neither 8-byte nor 16-byte aligned, shift and mask. */
148 mov %g1, %o4
149 and %o1, 0x7, %g1
150 sll %g1, 3, %g1
151 mov 64, %o3
152 andn %o1, 0x7, %o1
153 EX_LD(LOAD(ldx, %o1, %g2))
154 sub %o3, %g1, %o3
155 sllx %g2, %g1, %g2
156
157#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\
158 EX_LD(LOAD(ldx, SRC, TMP1)); \
159 srlx TMP1, PRE_SHIFT, TMP2; \
160 or TMP2, PRE_VAL, TMP2; \
161 EX_ST(STORE_INIT(TMP2, DST)); \
162 sllx TMP1, POST_SHIFT, PRE_VAL;
163
1641: add %o1, 0x8, %o1
165 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00)
166 add %o1, 0x8, %o1
167 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08)
168 add %o1, 0x8, %o1
169 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10)
170 add %o1, 0x8, %o1
171 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18)
172 add %o1, 32, %o1
173 LOAD(prefetch, %o1, #one_read)
174 sub %o1, 32 - 8, %o1
175 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20)
176 add %o1, 8, %o1
177 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28)
178 add %o1, 8, %o1
179 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30)
180 add %o1, 8, %o1
181 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38)
182 subcc %o4, 64, %o4
183 bne,pt %XCC, 1b
184 add %o0, 64, %o0
185
186#undef SWIVEL_ONE_DWORD
187
188 srl %g1, 3, %g1
189 ba,pt %XCC, 60f
190 add %o1, %g1, %o1
191
19210: /* Destination is 64-byte aligned, source was only 8-byte
193 * aligned but it has been subtracted by 8 and we perform
194 * one twin load ahead, then add 8 back into source when
195 * we finish the loop.
196 */
197 EX_LD(LOAD_TWIN(%o1, %o4, %o5))
1981: add %o1, 16, %o1
199 EX_LD(LOAD_TWIN(%o1, %g2, %g3))
200 add %o1, 16 + 32, %o1
201 LOAD(prefetch, %o1, #one_read)
202 sub %o1, 32, %o1
203 EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line
204 EX_ST(STORE_INIT(%g2, %o0 + 0x08))
205 EX_LD(LOAD_TWIN(%o1, %o4, %o5))
206 add %o1, 16, %o1
207 EX_ST(STORE_INIT(%g3, %o0 + 0x10))
208 EX_ST(STORE_INIT(%o4, %o0 + 0x18))
209 EX_LD(LOAD_TWIN(%o1, %g2, %g3))
210 add %o1, 16, %o1
211 EX_ST(STORE_INIT(%o5, %o0 + 0x20))
212 EX_ST(STORE_INIT(%g2, %o0 + 0x28))
213 EX_LD(LOAD_TWIN(%o1, %o4, %o5))
214 EX_ST(STORE_INIT(%g3, %o0 + 0x30))
215 EX_ST(STORE_INIT(%o4, %o0 + 0x38))
216 subcc %g1, 64, %g1
217 bne,pt %XCC, 1b
218 add %o0, 64, %o0
219
220 ba,pt %XCC, 60f
221 add %o1, 0x8, %o1
222
22350: /* Destination is 64-byte aligned, and source is 16-byte
224 * aligned.
225 */
2261: EX_LD(LOAD_TWIN(%o1, %o4, %o5))
227 add %o1, 16, %o1
228 EX_LD(LOAD_TWIN(%o1, %g2, %g3))
229 add %o1, 16 + 32, %o1
230 LOAD(prefetch, %o1, #one_read)
231 sub %o1, 32, %o1
232 EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line
233 EX_ST(STORE_INIT(%o5, %o0 + 0x08))
234 EX_LD(LOAD_TWIN(%o1, %o4, %o5))
235 add %o1, 16, %o1
236 EX_ST(STORE_INIT(%g2, %o0 + 0x10))
237 EX_ST(STORE_INIT(%g3, %o0 + 0x18))
238 EX_LD(LOAD_TWIN(%o1, %g2, %g3))
239 add %o1, 16, %o1
240 EX_ST(STORE_INIT(%o4, %o0 + 0x20))
241 EX_ST(STORE_INIT(%o5, %o0 + 0x28))
242 EX_ST(STORE_INIT(%g2, %o0 + 0x30))
243 EX_ST(STORE_INIT(%g3, %o0 + 0x38))
244 subcc %g1, 64, %g1
245 bne,pt %XCC, 1b
246 add %o0, 64, %o0
247 /* fall through */
248
24960:
250 /* %o2 contains any final bytes still needed to be copied
251 * over. If anything is left, we copy it one byte at a time.
252 */
253 RESTORE_ASI(%o3)
254 brz,pt %o2, 85f
255 sub %o0, %o1, %o3
256 ba,a,pt %XCC, 90f
257
258 .align 64
25970: /* 16 < len <= 64 */
260 bne,pn %XCC, 75f
261 sub %o0, %o1, %o3
262
26372:
264 andn %o2, 0xf, %o4
265 and %o2, 0xf, %o2
2661: subcc %o4, 0x10, %o4
267 EX_LD(LOAD(ldx, %o1, %o5))
268 add %o1, 0x08, %o1
269 EX_LD(LOAD(ldx, %o1, %g1))
270 sub %o1, 0x08, %o1
271 EX_ST(STORE(stx, %o5, %o1 + %o3))
272 add %o1, 0x8, %o1
273 EX_ST(STORE(stx, %g1, %o1 + %o3))
274 bgu,pt %XCC, 1b
275 add %o1, 0x8, %o1
27673: andcc %o2, 0x8, %g0
277 be,pt %XCC, 1f
278 nop
279 sub %o2, 0x8, %o2
280 EX_LD(LOAD(ldx, %o1, %o5))
281 EX_ST(STORE(stx, %o5, %o1 + %o3))
282 add %o1, 0x8, %o1
2831: andcc %o2, 0x4, %g0
284 be,pt %XCC, 1f
285 nop
286 sub %o2, 0x4, %o2
287 EX_LD(LOAD(lduw, %o1, %o5))
288 EX_ST(STORE(stw, %o5, %o1 + %o3))
289 add %o1, 0x4, %o1
2901: cmp %o2, 0
291 be,pt %XCC, 85f
292 nop
293 ba,pt %xcc, 90f
294 nop
295
29675:
297 andcc %o0, 0x7, %g1
298 sub %g1, 0x8, %g1
299 be,pn %icc, 2f
300 sub %g0, %g1, %g1
301 sub %o2, %g1, %o2
302
3031: subcc %g1, 1, %g1
304 EX_LD(LOAD(ldub, %o1, %o5))
305 EX_ST(STORE(stb, %o5, %o1 + %o3))
306 bgu,pt %icc, 1b
307 add %o1, 1, %o1
308
3092: add %o1, %o3, %o0
310 andcc %o1, 0x7, %g1
311 bne,pt %icc, 8f
312 sll %g1, 3, %g1
313
314 cmp %o2, 16
315 bgeu,pt %icc, 72b
316 nop
317 ba,a,pt %xcc, 73b
318
3198: mov 64, %o3
320 andn %o1, 0x7, %o1
321 EX_LD(LOAD(ldx, %o1, %g2))
322 sub %o3, %g1, %o3
323 andn %o2, 0x7, %o4
324 sllx %g2, %g1, %g2
3251: add %o1, 0x8, %o1
326 EX_LD(LOAD(ldx, %o1, %g3))
327 subcc %o4, 0x8, %o4
328 srlx %g3, %o3, %o5
329 or %o5, %g2, %o5
330 EX_ST(STORE(stx, %o5, %o0))
331 add %o0, 0x8, %o0
332 bgu,pt %icc, 1b
333 sllx %g3, %g1, %g2
334
335 srl %g1, 3, %g1
336 andcc %o2, 0x7, %o2
337 be,pn %icc, 85f
338 add %o1, %g1, %o1
339 ba,pt %xcc, 90f
340 sub %o0, %o1, %o3
341
342 .align 64
34380: /* 0 < len <= 16 */
344 andcc %o3, 0x3, %g0
345 bne,pn %XCC, 90f
346 sub %o0, %o1, %o3
347
3481:
349 subcc %o2, 4, %o2
350 EX_LD(LOAD(lduw, %o1, %g1))
351 EX_ST(STORE(stw, %g1, %o1 + %o3))
352 bgu,pt %XCC, 1b
353 add %o1, 4, %o1
354
35585: retl
356 mov EX_RETVAL(GLOBAL_SPARE), %o0
357
358 .align 32
35990:
360 subcc %o2, 1, %o2
361 EX_LD(LOAD(ldub, %o1, %g1))
362 EX_ST(STORE(stb, %g1, %o1 + %o3))
363 bgu,pt %XCC, 90b
364 add %o1, 1, %o1
365 retl
366 mov EX_RETVAL(GLOBAL_SPARE), %o0
367
368 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/NGpage.S b/arch/sparc64/lib/NGpage.S
new file mode 100644
index 000000000000..7d7c3bb8dcbf
--- /dev/null
+++ b/arch/sparc64/lib/NGpage.S
@@ -0,0 +1,96 @@
1/* NGpage.S: Niagara optimize clear and copy page.
2 *
3 * Copyright (C) 2006 (davem@davemloft.net)
4 */
5
6#include <asm/asi.h>
7#include <asm/page.h>
8
9 .text
10 .align 32
11
12 /* This is heavily simplified from the sun4u variants
13 * because Niagara does not have any D-cache aliasing issues
14 * and also we don't need to use the FPU in order to implement
15 * an optimal page copy/clear.
16 */
17
18NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
19 prefetch [%o1 + 0x00], #one_read
20 mov 8, %g1
21 mov 16, %g2
22 mov 24, %g3
23 set PAGE_SIZE, %g7
24
251: ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2
26 ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4
27 prefetch [%o1 + 0x40], #one_read
28 add %o1, 32, %o1
29 stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
30 stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
31 ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2
32 stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
33 stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
34 ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4
35 add %o1, 32, %o1
36 add %o0, 32, %o0
37 stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
38 stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
39 stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
40 stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
41 subcc %g7, 64, %g7
42 bne,pt %xcc, 1b
43 add %o0, 32, %o0
44 retl
45 nop
46
47NGclear_page: /* %o0=dest */
48NGclear_user_page: /* %o0=dest, %o1=vaddr */
49 mov 8, %g1
50 mov 16, %g2
51 mov 24, %g3
52 set PAGE_SIZE, %g7
53
541: stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
55 stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
56 stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
57 stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
58 add %o0, 32, %o0
59 stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
60 stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
61 stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
62 stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
63 subcc %g7, 64, %g7
64 bne,pt %xcc, 1b
65 add %o0, 32, %o0
66 retl
67 nop
68
69#define BRANCH_ALWAYS 0x10680000
70#define NOP 0x01000000
71#define NG_DO_PATCH(OLD, NEW) \
72 sethi %hi(NEW), %g1; \
73 or %g1, %lo(NEW), %g1; \
74 sethi %hi(OLD), %g2; \
75 or %g2, %lo(OLD), %g2; \
76 sub %g1, %g2, %g1; \
77 sethi %hi(BRANCH_ALWAYS), %g3; \
78 sll %g1, 11, %g1; \
79 srl %g1, 11 + 2, %g1; \
80 or %g3, %lo(BRANCH_ALWAYS), %g3; \
81 or %g3, %g1, %g3; \
82 stw %g3, [%g2]; \
83 sethi %hi(NOP), %g3; \
84 or %g3, %lo(NOP), %g3; \
85 stw %g3, [%g2 + 0x4]; \
86 flush %g2;
87
88 .globl niagara_patch_pageops
89 .type niagara_patch_pageops,#function
90niagara_patch_pageops:
91 NG_DO_PATCH(copy_user_page, NGcopy_user_page)
92 NG_DO_PATCH(_clear_page, NGclear_page)
93 NG_DO_PATCH(clear_user_page, NGclear_user_page)
94 retl
95 nop
96 .size niagara_patch_pageops,.-niagara_patch_pageops
diff --git a/arch/sparc64/lib/NGpatch.S b/arch/sparc64/lib/NGpatch.S
new file mode 100644
index 000000000000..3b0674fc3366
--- /dev/null
+++ b/arch/sparc64/lib/NGpatch.S
@@ -0,0 +1,33 @@
1/* NGpatch.S: Patch Ultra-I routines with Niagara variant.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#define BRANCH_ALWAYS 0x10680000
7#define NOP 0x01000000
8#define NG_DO_PATCH(OLD, NEW) \
9 sethi %hi(NEW), %g1; \
10 or %g1, %lo(NEW), %g1; \
11 sethi %hi(OLD), %g2; \
12 or %g2, %lo(OLD), %g2; \
13 sub %g1, %g2, %g1; \
14 sethi %hi(BRANCH_ALWAYS), %g3; \
15 sll %g1, 11, %g1; \
16 srl %g1, 11 + 2, %g1; \
17 or %g3, %lo(BRANCH_ALWAYS), %g3; \
18 or %g3, %g1, %g3; \
19 stw %g3, [%g2]; \
20 sethi %hi(NOP), %g3; \
21 or %g3, %lo(NOP), %g3; \
22 stw %g3, [%g2 + 0x4]; \
23 flush %g2;
24
25 .globl niagara_patch_copyops
26 .type niagara_patch_copyops,#function
27niagara_patch_copyops:
28 NG_DO_PATCH(memcpy, NGmemcpy)
29 NG_DO_PATCH(___copy_from_user, NGcopy_from_user)
30 NG_DO_PATCH(___copy_to_user, NGcopy_to_user)
31 retl
32 nop
33 .size niagara_patch_copyops,.-niagara_patch_copyops
diff --git a/arch/sparc64/lib/U3patch.S b/arch/sparc64/lib/U3patch.S
index e2b6c5e4b95a..ecc302619a6e 100644
--- a/arch/sparc64/lib/U3patch.S
+++ b/arch/sparc64/lib/U3patch.S
@@ -12,7 +12,8 @@
12 or %g2, %lo(OLD), %g2; \ 12 or %g2, %lo(OLD), %g2; \
13 sub %g1, %g2, %g1; \ 13 sub %g1, %g2, %g1; \
14 sethi %hi(BRANCH_ALWAYS), %g3; \ 14 sethi %hi(BRANCH_ALWAYS), %g3; \
15 srl %g1, 2, %g1; \ 15 sll %g1, 11, %g1; \
16 srl %g1, 11 + 2, %g1; \
16 or %g3, %lo(BRANCH_ALWAYS), %g3; \ 17 or %g3, %lo(BRANCH_ALWAYS), %g3; \
17 or %g3, %g1, %g3; \ 18 or %g3, %g1, %g3; \
18 stw %g3, [%g2]; \ 19 stw %g3, [%g2]; \
diff --git a/arch/sparc64/lib/bzero.S b/arch/sparc64/lib/bzero.S
index 1d2abcfa4e52..c7bbae8c590f 100644
--- a/arch/sparc64/lib/bzero.S
+++ b/arch/sparc64/lib/bzero.S
@@ -98,12 +98,12 @@ __bzero_done:
98 .text; \ 98 .text; \
99 .align 4; 99 .align 4;
100 100
101 .globl __bzero_noasi 101 .globl __clear_user
102 .type __bzero_noasi, #function 102 .type __clear_user, #function
103__bzero_noasi: /* %o0=buf, %o1=len */ 103__clear_user: /* %o0=buf, %o1=len */
104 brz,pn %o1, __bzero_noasi_done 104 brz,pn %o1, __clear_user_done
105 cmp %o1, 16 105 cmp %o1, 16
106 bl,pn %icc, __bzero_noasi_tiny 106 bl,pn %icc, __clear_user_tiny
107 EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes) 107 EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes)
108 andcc %o0, 0x3, %g0 108 andcc %o0, 0x3, %g0
109 be,pt %icc, 2f 109 be,pt %icc, 2f
@@ -145,14 +145,14 @@ __bzero_noasi: /* %o0=buf, %o1=len */
145 subcc %g1, 8, %g1 145 subcc %g1, 8, %g1
146 bne,pt %icc, 5b 146 bne,pt %icc, 5b
147 add %o0, 0x8, %o0 147 add %o0, 0x8, %o0
1486: brz,pt %o1, __bzero_noasi_done 1486: brz,pt %o1, __clear_user_done
149 nop 149 nop
150__bzero_noasi_tiny: 150__clear_user_tiny:
1511: EX_ST(stba %g0, [%o0 + 0x00] %asi) 1511: EX_ST(stba %g0, [%o0 + 0x00] %asi)
152 subcc %o1, 1, %o1 152 subcc %o1, 1, %o1
153 bne,pt %icc, 1b 153 bne,pt %icc, 1b
154 add %o0, 1, %o0 154 add %o0, 1, %o0
155__bzero_noasi_done: 155__clear_user_done:
156 retl 156 retl
157 clr %o0 157 clr %o0
158 .size __bzero_noasi, .-__bzero_noasi 158 .size __clear_user, .-__clear_user
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
index b59884ef051d..77e531f6c2a7 100644
--- a/arch/sparc64/lib/clear_page.S
+++ b/arch/sparc64/lib/clear_page.S
@@ -9,6 +9,7 @@
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/spitfire.h> 11#include <asm/spitfire.h>
12#include <asm/head.h>
12 13
13 /* What we used to do was lock a TLB entry into a specific 14 /* What we used to do was lock a TLB entry into a specific
14 * TLB slot, clear the page with interrupts disabled, then 15 * TLB slot, clear the page with interrupts disabled, then
@@ -22,9 +23,6 @@
22 * disable preemption during the clear. 23 * disable preemption during the clear.
23 */ 24 */
24 25
25#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
26#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
27
28 .text 26 .text
29 27
30 .globl _clear_page 28 .globl _clear_page
@@ -43,12 +41,11 @@ clear_user_page: /* %o0=dest, %o1=vaddr */
43 sethi %hi(PAGE_SIZE), %o4 41 sethi %hi(PAGE_SIZE), %o4
44 42
45 sllx %g2, 32, %g2 43 sllx %g2, 32, %g2
46 sethi %uhi(TTE_BITS_TOP), %g3 44 sethi %hi(PAGE_KERNEL_LOCKED), %g3
47 45
48 sllx %g3, 32, %g3 46 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
49 sub %o0, %g2, %g1 ! paddr 47 sub %o0, %g2, %g1 ! paddr
50 48
51 or %g3, TTE_BITS_BOTTOM, %g3
52 and %o1, %o4, %o0 ! vaddr D-cache alias bit 49 and %o1, %o4, %o0 ! vaddr D-cache alias bit
53 50
54 or %g1, %g3, %g1 ! TTE data 51 or %g1, %g3, %g1 ! TTE data
@@ -66,7 +63,8 @@ clear_user_page: /* %o0=dest, %o1=vaddr */
66 wrpr %o4, PSTATE_IE, %pstate 63 wrpr %o4, PSTATE_IE, %pstate
67 stxa %o0, [%g3] ASI_DMMU 64 stxa %o0, [%g3] ASI_DMMU
68 stxa %g1, [%g0] ASI_DTLB_DATA_IN 65 stxa %g1, [%g0] ASI_DTLB_DATA_IN
69 flush %g6 66 sethi %hi(KERNBASE), %g1
67 flush %g1
70 wrpr %o4, 0x0, %pstate 68 wrpr %o4, 0x0, %pstate
71 69
72 mov 1, %o4 70 mov 1, %o4
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S
index feebb14fd27a..37460666a5c3 100644
--- a/arch/sparc64/lib/copy_page.S
+++ b/arch/sparc64/lib/copy_page.S
@@ -23,8 +23,6 @@
23 * disable preemption during the clear. 23 * disable preemption during the clear.
24 */ 24 */
25 25
26#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
27#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
28#define DCACHE_SIZE (PAGE_SIZE * 2) 26#define DCACHE_SIZE (PAGE_SIZE * 2)
29 27
30#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) 28#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
@@ -52,13 +50,12 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
52 sethi %hi(PAGE_SIZE), %o3 50 sethi %hi(PAGE_SIZE), %o3
53 51
54 sllx %g2, 32, %g2 52 sllx %g2, 32, %g2
55 sethi %uhi(TTE_BITS_TOP), %g3 53 sethi %hi(PAGE_KERNEL_LOCKED), %g3
56 54
57 sllx %g3, 32, %g3 55 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
58 sub %o0, %g2, %g1 ! dest paddr 56 sub %o0, %g2, %g1 ! dest paddr
59 57
60 sub %o1, %g2, %g2 ! src paddr 58 sub %o1, %g2, %g2 ! src paddr
61 or %g3, TTE_BITS_BOTTOM, %g3
62 59
63 and %o2, %o3, %o0 ! vaddr D-cache alias bit 60 and %o2, %o3, %o0 ! vaddr D-cache alias bit
64 or %g1, %g3, %g1 ! dest TTE data 61 or %g1, %g3, %g1 ! dest TTE data
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c
index e8808727617a..fb27e54a03ee 100644
--- a/arch/sparc64/lib/delay.c
+++ b/arch/sparc64/lib/delay.c
@@ -1,6 +1,6 @@
1/* delay.c: Delay loops for sparc64 1/* delay.c: Delay loops for sparc64
2 * 2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com> 3 * Copyright (C) 2004, 2006 David S. Miller <davem@davemloft.net>
4 * 4 *
5 * Based heavily upon x86 variant which is: 5 * Based heavily upon x86 variant which is:
6 * Copyright (C) 1993 Linus Torvalds 6 * Copyright (C) 1993 Linus Torvalds
@@ -8,19 +8,16 @@
8 */ 8 */
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <asm/timer.h>
11 12
12void __delay(unsigned long loops) 13void __delay(unsigned long loops)
13{ 14{
14 __asm__ __volatile__( 15 unsigned long bclock, now;
15" b,pt %%xcc, 1f\n" 16
16" cmp %0, 0\n" 17 bclock = tick_ops->get_tick();
17" .align 32\n" 18 do {
18"1:\n" 19 now = tick_ops->get_tick();
19" bne,pt %%xcc, 1b\n" 20 } while ((now-bclock) < loops);
20" subcc %0, 1, %0\n"
21 : "=&r" (loops)
22 : "0" (loops)
23 : "cc");
24} 21}
25 22
26/* We used to multiply by HZ after shifting down by 32 bits 23/* We used to multiply by HZ after shifting down by 32 bits
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S
index 4cd5d2be1ae1..a79c8888170d 100644
--- a/arch/sparc64/lib/xor.S
+++ b/arch/sparc64/lib/xor.S
@@ -2,9 +2,10 @@
2 * arch/sparc64/lib/xor.S 2 * arch/sparc64/lib/xor.S
3 * 3 *
4 * High speed xor_block operation for RAID4/5 utilizing the 4 * High speed xor_block operation for RAID4/5 utilizing the
5 * UltraSparc Visual Instruction Set. 5 * UltraSparc Visual Instruction Set and Niagara store-init/twin-load.
6 * 6 *
7 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) 7 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
8 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
8 */ 9 */
9 10
10#include <asm/visasm.h> 11#include <asm/visasm.h>
@@ -19,6 +20,8 @@
19 */ 20 */
20 .text 21 .text
21 .align 32 22 .align 32
23
24 /* VIS versions. */
22 .globl xor_vis_2 25 .globl xor_vis_2
23 .type xor_vis_2,#function 26 .type xor_vis_2,#function
24xor_vis_2: 27xor_vis_2:
@@ -352,3 +355,298 @@ xor_vis_5:
352 ret 355 ret
353 restore 356 restore
354 .size xor_vis_5, .-xor_vis_5 357 .size xor_vis_5, .-xor_vis_5
358
359 /* Niagara versions. */
360 .globl xor_niagara_2
361 .type xor_niagara_2,#function
362xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */
363 save %sp, -192, %sp
364 prefetch [%i1], #n_writes
365 prefetch [%i2], #one_read
366 rd %asi, %g7
367 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
368 srlx %i0, 6, %g1
369 mov %i1, %i0
370 mov %i2, %i1
3711: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src + 0x00 */
372 ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src + 0x10 */
373 ldda [%i1 + 0x20] %asi, %g2 /* %g2/%g3 = src + 0x20 */
374 ldda [%i1 + 0x30] %asi, %l0 /* %l0/%l1 = src + 0x30 */
375 prefetch [%i1 + 0x40], #one_read
376 ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */
377 ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */
378 ldda [%i0 + 0x20] %asi, %o4 /* %o4/%o5 = dest + 0x20 */
379 ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */
380 prefetch [%i0 + 0x40], #n_writes
381 xor %o0, %i2, %o0
382 xor %o1, %i3, %o1
383 stxa %o0, [%i0 + 0x00] %asi
384 stxa %o1, [%i0 + 0x08] %asi
385 xor %o2, %i4, %o2
386 xor %o3, %i5, %o3
387 stxa %o2, [%i0 + 0x10] %asi
388 stxa %o3, [%i0 + 0x18] %asi
389 xor %o4, %g2, %o4
390 xor %o5, %g3, %o5
391 stxa %o4, [%i0 + 0x20] %asi
392 stxa %o5, [%i0 + 0x28] %asi
393 xor %l2, %l0, %l2
394 xor %l3, %l1, %l3
395 stxa %l2, [%i0 + 0x30] %asi
396 stxa %l3, [%i0 + 0x38] %asi
397 add %i0, 0x40, %i0
398 subcc %g1, 1, %g1
399 bne,pt %xcc, 1b
400 add %i1, 0x40, %i1
401 membar #Sync
402 wr %g7, 0x0, %asi
403 ret
404 restore
405 .size xor_niagara_2, .-xor_niagara_2
406
407 .globl xor_niagara_3
408 .type xor_niagara_3,#function
409xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
410 save %sp, -192, %sp
411 prefetch [%i1], #n_writes
412 prefetch [%i2], #one_read
413 prefetch [%i3], #one_read
414 rd %asi, %g7
415 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
416 srlx %i0, 6, %g1
417 mov %i1, %i0
418 mov %i2, %i1
419 mov %i3, %l7
4201: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */
421 ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src1 + 0x10 */
422 ldda [%l7 + 0x00] %asi, %g2 /* %g2/%g3 = src2 + 0x00 */
423 ldda [%l7 + 0x10] %asi, %l0 /* %l0/%l1 = src2 + 0x10 */
424 ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */
425 ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */
426 xor %g2, %i2, %g2
427 xor %g3, %i3, %g3
428 xor %o0, %g2, %o0
429 xor %o1, %g3, %o1
430 stxa %o0, [%i0 + 0x00] %asi
431 stxa %o1, [%i0 + 0x08] %asi
432 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */
433 ldda [%l7 + 0x20] %asi, %g2 /* %g2/%g3 = src2 + 0x20 */
434 ldda [%i0 + 0x20] %asi, %o0 /* %o0/%o1 = dest + 0x20 */
435 xor %l0, %i4, %l0
436 xor %l1, %i5, %l1
437 xor %o2, %l0, %o2
438 xor %o3, %l1, %o3
439 stxa %o2, [%i0 + 0x10] %asi
440 stxa %o3, [%i0 + 0x18] %asi
441 ldda [%i1 + 0x30] %asi, %i4 /* %i4/%i5 = src1 + 0x30 */
442 ldda [%l7 + 0x30] %asi, %l0 /* %l0/%l1 = src2 + 0x30 */
443 ldda [%i0 + 0x30] %asi, %o2 /* %o2/%o3 = dest + 0x30 */
444 prefetch [%i1 + 0x40], #one_read
445 prefetch [%l7 + 0x40], #one_read
446 prefetch [%i0 + 0x40], #n_writes
447 xor %g2, %i2, %g2
448 xor %g3, %i3, %g3
449 xor %o0, %g2, %o0
450 xor %o1, %g3, %o1
451 stxa %o0, [%i0 + 0x20] %asi
452 stxa %o1, [%i0 + 0x28] %asi
453 xor %l0, %i4, %l0
454 xor %l1, %i5, %l1
455 xor %o2, %l0, %o2
456 xor %o3, %l1, %o3
457 stxa %o2, [%i0 + 0x30] %asi
458 stxa %o3, [%i0 + 0x38] %asi
459 add %i0, 0x40, %i0
460 add %i1, 0x40, %i1
461 subcc %g1, 1, %g1
462 bne,pt %xcc, 1b
463 add %l7, 0x40, %l7
464 membar #Sync
465 wr %g7, 0x0, %asi
466 ret
467 restore
468 .size xor_niagara_3, .-xor_niagara_3
469
470 .globl xor_niagara_4
471 .type xor_niagara_4,#function
472xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
473 save %sp, -192, %sp
474 prefetch [%i1], #n_writes
475 prefetch [%i2], #one_read
476 prefetch [%i3], #one_read
477 prefetch [%i4], #one_read
478 rd %asi, %g7
479 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
480 srlx %i0, 6, %g1
481 mov %i1, %i0
482 mov %i2, %i1
483 mov %i3, %l7
484 mov %i4, %l6
4851: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */
486 ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */
487 ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */
488 ldda [%i0 + 0x00] %asi, %l0 /* %l0/%l1 = dest + 0x00 */
489 xor %i4, %i2, %i4
490 xor %i5, %i3, %i5
491 ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */
492 xor %g2, %i4, %g2
493 xor %g3, %i5, %g3
494 ldda [%i7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */
495 xor %l0, %g2, %l0
496 xor %l1, %g3, %l1
497 stxa %l0, [%i0 + 0x00] %asi
498 stxa %l1, [%i0 + 0x08] %asi
499 ldda [%i6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */
500 ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */
501
502 xor %i4, %i2, %i4
503 xor %i5, %i3, %i5
504 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */
505 xor %g2, %i4, %g2
506 xor %g3, %i5, %g3
507 ldda [%i7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */
508 xor %l0, %g2, %l0
509 xor %l1, %g3, %l1
510 stxa %l0, [%i0 + 0x10] %asi
511 stxa %l1, [%i0 + 0x18] %asi
512 ldda [%i6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */
513 ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */
514
515 xor %i4, %i2, %i4
516 xor %i5, %i3, %i5
517 ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */
518 xor %g2, %i4, %g2
519 xor %g3, %i5, %g3
520 ldda [%i7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */
521 xor %l0, %g2, %l0
522 xor %l1, %g3, %l1
523 stxa %l0, [%i0 + 0x20] %asi
524 stxa %l1, [%i0 + 0x28] %asi
525 ldda [%i6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */
526 ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */
527
528 prefetch [%i1 + 0x40], #one_read
529 prefetch [%l7 + 0x40], #one_read
530 prefetch [%l6 + 0x40], #one_read
531 prefetch [%i0 + 0x40], #n_writes
532
533 xor %i4, %i2, %i4
534 xor %i5, %i3, %i5
535 xor %g2, %i4, %g2
536 xor %g3, %i5, %g3
537 xor %l0, %g2, %l0
538 xor %l1, %g3, %l1
539 stxa %l0, [%i0 + 0x30] %asi
540 stxa %l1, [%i0 + 0x38] %asi
541
542 add %i0, 0x40, %i0
543 add %i1, 0x40, %i1
544 add %l7, 0x40, %l7
545 subcc %g1, 1, %g1
546 bne,pt %xcc, 1b
547 add %l6, 0x40, %l6
548 membar #Sync
549 wr %g7, 0x0, %asi
550 ret
551 restore
552 .size xor_niagara_4, .-xor_niagara_4
553
554 .globl xor_niagara_5
555 .type xor_niagara_5,#function
556xor_niagara_5: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
557 save %sp, -192, %sp
558 prefetch [%i1], #n_writes
559 prefetch [%i2], #one_read
560 prefetch [%i3], #one_read
561 prefetch [%i4], #one_read
562 prefetch [%i5], #one_read
563 rd %asi, %g7
564 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
565 srlx %i0, 6, %g1
566 mov %i1, %i0
567 mov %i2, %i1
568 mov %i3, %l7
569 mov %i4, %l6
570 mov %i5, %l5
5711: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */
572 ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */
573 ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */
574 ldda [%l5 + 0x00] %asi, %l0 /* %l0/%l1 = src4 + 0x00 */
575 ldda [%i0 + 0x00] %asi, %l2 /* %l2/%l3 = dest + 0x00 */
576 xor %i4, %i2, %i4
577 xor %i5, %i3, %i5
578 ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */
579 xor %g2, %i4, %g2
580 xor %g3, %i5, %g3
581 ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */
582 xor %l0, %g2, %l0
583 xor %l1, %g3, %l1
584 ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */
585 xor %l2, %l0, %l2
586 xor %l3, %l1, %l3
587 stxa %l2, [%i0 + 0x00] %asi
588 stxa %l3, [%i0 + 0x08] %asi
589 ldda [%l5 + 0x10] %asi, %l0 /* %l0/%l1 = src4 + 0x10 */
590 ldda [%i0 + 0x10] %asi, %l2 /* %l2/%l3 = dest + 0x10 */
591
592 xor %i4, %i2, %i4
593 xor %i5, %i3, %i5
594 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */
595 xor %g2, %i4, %g2
596 xor %g3, %i5, %g3
597 ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */
598 xor %l0, %g2, %l0
599 xor %l1, %g3, %l1
600 ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */
601 xor %l2, %l0, %l2
602 xor %l3, %l1, %l3
603 stxa %l2, [%i0 + 0x10] %asi
604 stxa %l3, [%i0 + 0x18] %asi
605 ldda [%l5 + 0x20] %asi, %l0 /* %l0/%l1 = src4 + 0x20 */
606 ldda [%i0 + 0x20] %asi, %l2 /* %l2/%l3 = dest + 0x20 */
607
608 xor %i4, %i2, %i4
609 xor %i5, %i3, %i5
610 ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */
611 xor %g2, %i4, %g2
612 xor %g3, %i5, %g3
613 ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */
614 xor %l0, %g2, %l0
615 xor %l1, %g3, %l1
616 ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */
617 xor %l2, %l0, %l2
618 xor %l3, %l1, %l3
619 stxa %l2, [%i0 + 0x20] %asi
620 stxa %l3, [%i0 + 0x28] %asi
621 ldda [%l5 + 0x30] %asi, %l0 /* %l0/%l1 = src4 + 0x30 */
622 ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */
623
624 prefetch [%i1 + 0x40], #one_read
625 prefetch [%l7 + 0x40], #one_read
626 prefetch [%l6 + 0x40], #one_read
627 prefetch [%l5 + 0x40], #one_read
628 prefetch [%i0 + 0x40], #n_writes
629
630 xor %i4, %i2, %i4
631 xor %i5, %i3, %i5
632 xor %g2, %i4, %g2
633 xor %g3, %i5, %g3
634 xor %l0, %g2, %l0
635 xor %l1, %g3, %l1
636 xor %l2, %l0, %l2
637 xor %l3, %l1, %l3
638 stxa %l2, [%i0 + 0x30] %asi
639 stxa %l3, [%i0 + 0x38] %asi
640
641 add %i0, 0x40, %i0
642 add %i1, 0x40, %i1
643 add %l7, 0x40, %l7
644 add %l6, 0x40, %l6
645 subcc %g1, 1, %g1
646 bne,pt %xcc, 1b
647 add %l5, 0x40, %l5
648 membar #Sync
649 wr %g7, 0x0, %asi
650 ret
651 restore
652 .size xor_niagara_5, .-xor_niagara_5
diff --git a/arch/sparc64/math-emu/math.c b/arch/sparc64/math-emu/math.c
index 2ae05cd7b773..6ee496c2864a 100644
--- a/arch/sparc64/math-emu/math.c
+++ b/arch/sparc64/math-emu/math.c
@@ -206,9 +206,29 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
206 case FSTOQ: TYPE(3,3,1,1,1,0,0); break; 206 case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
207 case FDTOQ: TYPE(3,3,1,2,1,0,0); break; 207 case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
208 case FQTOI: TYPE(3,1,0,3,1,0,0); break; 208 case FQTOI: TYPE(3,1,0,3,1,0,0); break;
209
210 /* We can get either unimplemented or unfinished
211 * for these cases. Pre-Niagara systems generate
212 * unfinished fpop for SUBNORMAL cases, and Niagara
213 * always gives unimplemented fpop for fsqrt{s,d}.
214 */
215 case FSQRTS: {
216 unsigned long x = current_thread_info()->xfsr[0];
217
218 x = (x >> 14) & 0xf;
219 TYPE(x,1,1,1,1,0,0);
220 break;
221 }
222
223 case FSQRTD: {
224 unsigned long x = current_thread_info()->xfsr[0];
225
226 x = (x >> 14) & 0xf;
227 TYPE(x,2,1,2,1,0,0);
228 break;
229 }
230
209 /* SUBNORMAL - ftt == 2 */ 231 /* SUBNORMAL - ftt == 2 */
210 case FSQRTS: TYPE(2,1,1,1,1,0,0); break;
211 case FSQRTD: TYPE(2,2,1,2,1,0,0); break;
212 case FADDD: 232 case FADDD:
213 case FSUBD: 233 case FSUBD:
214 case FMULD: 234 case FMULD:
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
index 9d0960e69f48..e415bf942bcd 100644
--- a/arch/sparc64/mm/Makefile
+++ b/arch/sparc64/mm/Makefile
@@ -5,6 +5,6 @@
5EXTRA_AFLAGS := -ansi 5EXTRA_AFLAGS := -ansi
6EXTRA_CFLAGS := -Werror 6EXTRA_CFLAGS := -Werror
7 7
8obj-y := ultra.o tlb.o fault.o init.o generic.o 8obj-y := ultra.o tlb.o tsb.o fault.o init.o generic.o
9 9
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 6f0539aa44d0..63b6cc0cd5d5 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -29,6 +29,7 @@
29#include <asm/lsu.h> 29#include <asm/lsu.h>
30#include <asm/sections.h> 30#include <asm/sections.h>
31#include <asm/kdebug.h> 31#include <asm/kdebug.h>
32#include <asm/mmu_context.h>
32 33
33/* 34/*
34 * To debug kernel to catch accesses to certain virtual/physical addresses. 35 * To debug kernel to catch accesses to certain virtual/physical addresses.
@@ -91,12 +92,13 @@ static void __kprobes unhandled_fault(unsigned long address,
91 die_if_kernel("Oops", regs); 92 die_if_kernel("Oops", regs);
92} 93}
93 94
94static void bad_kernel_pc(struct pt_regs *regs) 95static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
95{ 96{
96 unsigned long *ksp; 97 unsigned long *ksp;
97 98
98 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", 99 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
99 regs->tpc); 100 regs->tpc);
101 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
100 __asm__("mov %%sp, %0" : "=r" (ksp)); 102 __asm__("mov %%sp, %0" : "=r" (ksp));
101 show_stack(current, ksp); 103 show_stack(current, ksp);
102 unhandled_fault(regs->tpc, current, regs); 104 unhandled_fault(regs->tpc, current, regs);
@@ -137,7 +139,7 @@ static unsigned int get_user_insn(unsigned long tpc)
137 if (!pte_present(pte)) 139 if (!pte_present(pte))
138 goto out; 140 goto out;
139 141
140 pa = (pte_val(pte) & _PAGE_PADDR); 142 pa = (pte_pfn(pte) << PAGE_SHIFT);
141 pa += (tpc & ~PAGE_MASK); 143 pa += (tpc & ~PAGE_MASK);
142 144
143 /* Use phys bypass so we don't pollute dtlb/dcache. */ 145 /* Use phys bypass so we don't pollute dtlb/dcache. */
@@ -257,7 +259,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
257 struct vm_area_struct *vma; 259 struct vm_area_struct *vma;
258 unsigned int insn = 0; 260 unsigned int insn = 0;
259 int si_code, fault_code; 261 int si_code, fault_code;
260 unsigned long address; 262 unsigned long address, mm_rss;
261 263
262 fault_code = get_thread_fault_code(); 264 fault_code = get_thread_fault_code();
263 265
@@ -280,7 +282,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
280 (tpc >= MODULES_VADDR && tpc < MODULES_END)) { 282 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
281 /* Valid, no problems... */ 283 /* Valid, no problems... */
282 } else { 284 } else {
283 bad_kernel_pc(regs); 285 bad_kernel_pc(regs, address);
284 return; 286 return;
285 } 287 }
286 } 288 }
@@ -406,6 +408,11 @@ good_area:
406 } 408 }
407 409
408 up_read(&mm->mmap_sem); 410 up_read(&mm->mmap_sem);
411
412 mm_rss = get_mm_rss(mm);
413 if (unlikely(mm_rss >= mm->context.tsb_rss_limit))
414 tsb_grow(mm, mm_rss);
415
409 return; 416 return;
410 417
411 /* 418 /*
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 580b63da836b..5fc5c579e35e 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -15,15 +15,6 @@
15#include <asm/page.h> 15#include <asm/page.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
19{
20 pte_t pte;
21 pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
22 ~(unsigned long)_PAGE_CACHE);
23 pte_val(pte) |= (((unsigned long)space) << 32);
24 return pte;
25}
26
27/* Remap IO memory, the same way as remap_pfn_range(), but use 18/* Remap IO memory, the same way as remap_pfn_range(), but use
28 * the obio memory space. 19 * the obio memory space.
29 * 20 *
@@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
48 pte_t entry; 39 pte_t entry;
49 unsigned long curend = address + PAGE_SIZE; 40 unsigned long curend = address + PAGE_SIZE;
50 41
51 entry = mk_pte_io(offset, prot, space); 42 entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
52 if (!(address & 0xffff)) { 43 if (!(address & 0xffff)) {
53 if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) { 44 if (PAGE_SIZE < (4 * 1024 * 1024) &&
54 entry = mk_pte_io(offset, 45 !(address & 0x3fffff) &&
55 __pgprot(pgprot_val (prot) | _PAGE_SZ4MB), 46 !(offset & 0x3ffffe) &&
56 space); 47 end >= address + 0x400000) {
48 entry = mk_pte_io(offset, prot, space,
49 4 * 1024 * 1024);
57 curend = address + 0x400000; 50 curend = address + 0x400000;
58 offset += 0x400000; 51 offset += 0x400000;
59 } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) { 52 } else if (PAGE_SIZE < (512 * 1024) &&
60 entry = mk_pte_io(offset, 53 !(address & 0x7ffff) &&
61 __pgprot(pgprot_val (prot) | _PAGE_SZ512K), 54 !(offset & 0x7fffe) &&
62 space); 55 end >= address + 0x80000) {
56 entry = mk_pte_io(offset, prot, space,
57 512 * 1024 * 1024);
63 curend = address + 0x80000; 58 curend = address + 0x80000;
64 offset += 0x80000; 59 offset += 0x80000;
65 } else if (!(offset & 0xfffe) && end >= address + 0x10000) { 60 } else if (PAGE_SIZE < (64 * 1024) &&
66 entry = mk_pte_io(offset, 61 !(offset & 0xfffe) &&
67 __pgprot(pgprot_val (prot) | _PAGE_SZ64K), 62 end >= address + 0x10000) {
68 space); 63 entry = mk_pte_io(offset, prot, space,
64 64 * 1024);
69 curend = address + 0x10000; 65 curend = address + 0x10000;
70 offset += 0x10000; 66 offset += 0x10000;
71 } else 67 } else
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 625cbb336a23..a7a24869d045 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SPARC64 Huge TLB page support. 2 * SPARC64 Huge TLB page support.
3 * 3 *
4 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5 */ 5 */
6 6
7#include <linux/config.h> 7#include <linux/config.h>
@@ -22,6 +22,175 @@
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
24 24
25/* Slightly simplified from the non-hugepage variant because by
26 * definition we don't have to worry about any page coloring stuff
27 */
28#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
29#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
30
31static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
32 unsigned long addr,
33 unsigned long len,
34 unsigned long pgoff,
35 unsigned long flags)
36{
37 struct mm_struct *mm = current->mm;
38 struct vm_area_struct * vma;
39 unsigned long task_size = TASK_SIZE;
40 unsigned long start_addr;
41
42 if (test_thread_flag(TIF_32BIT))
43 task_size = STACK_TOP32;
44 if (unlikely(len >= VA_EXCLUDE_START))
45 return -ENOMEM;
46
47 if (len > mm->cached_hole_size) {
48 start_addr = addr = mm->free_area_cache;
49 } else {
50 start_addr = addr = TASK_UNMAPPED_BASE;
51 mm->cached_hole_size = 0;
52 }
53
54 task_size -= len;
55
56full_search:
57 addr = ALIGN(addr, HPAGE_SIZE);
58
59 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
60 /* At this point: (!vma || addr < vma->vm_end). */
61 if (addr < VA_EXCLUDE_START &&
62 (addr + len) >= VA_EXCLUDE_START) {
63 addr = VA_EXCLUDE_END;
64 vma = find_vma(mm, VA_EXCLUDE_END);
65 }
66 if (unlikely(task_size < addr)) {
67 if (start_addr != TASK_UNMAPPED_BASE) {
68 start_addr = addr = TASK_UNMAPPED_BASE;
69 mm->cached_hole_size = 0;
70 goto full_search;
71 }
72 return -ENOMEM;
73 }
74 if (likely(!vma || addr + len <= vma->vm_start)) {
75 /*
76 * Remember the place where we stopped the search:
77 */
78 mm->free_area_cache = addr + len;
79 return addr;
80 }
81 if (addr + mm->cached_hole_size < vma->vm_start)
82 mm->cached_hole_size = vma->vm_start - addr;
83
84 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
85 }
86}
87
88static unsigned long
89hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
90 const unsigned long len,
91 const unsigned long pgoff,
92 const unsigned long flags)
93{
94 struct vm_area_struct *vma;
95 struct mm_struct *mm = current->mm;
96 unsigned long addr = addr0;
97
98 /* This should only ever run for 32-bit processes. */
99 BUG_ON(!test_thread_flag(TIF_32BIT));
100
101 /* check if free_area_cache is useful for us */
102 if (len <= mm->cached_hole_size) {
103 mm->cached_hole_size = 0;
104 mm->free_area_cache = mm->mmap_base;
105 }
106
107 /* either no address requested or can't fit in requested address hole */
108 addr = mm->free_area_cache & HPAGE_MASK;
109
110 /* make sure it can fit in the remaining address space */
111 if (likely(addr > len)) {
112 vma = find_vma(mm, addr-len);
113 if (!vma || addr <= vma->vm_start) {
114 /* remember the address as a hint for next time */
115 return (mm->free_area_cache = addr-len);
116 }
117 }
118
119 if (unlikely(mm->mmap_base < len))
120 goto bottomup;
121
122 addr = (mm->mmap_base-len) & HPAGE_MASK;
123
124 do {
125 /*
126 * Lookup failure means no vma is above this address,
127 * else if new region fits below vma->vm_start,
128 * return with success:
129 */
130 vma = find_vma(mm, addr);
131 if (likely(!vma || addr+len <= vma->vm_start)) {
132 /* remember the address as a hint for next time */
133 return (mm->free_area_cache = addr);
134 }
135
136 /* remember the largest hole we saw so far */
137 if (addr + mm->cached_hole_size < vma->vm_start)
138 mm->cached_hole_size = vma->vm_start - addr;
139
140 /* try just below the current vma->vm_start */
141 addr = (vma->vm_start-len) & HPAGE_MASK;
142 } while (likely(len < vma->vm_start));
143
144bottomup:
145 /*
146 * A failed mmap() very likely causes application failure,
147 * so fall back to the bottom-up function here. This scenario
148 * can happen with large stack limits and large mmap()
149 * allocations.
150 */
151 mm->cached_hole_size = ~0UL;
152 mm->free_area_cache = TASK_UNMAPPED_BASE;
153 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
154 /*
155 * Restore the topdown base:
156 */
157 mm->free_area_cache = mm->mmap_base;
158 mm->cached_hole_size = ~0UL;
159
160 return addr;
161}
162
163unsigned long
164hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
165 unsigned long len, unsigned long pgoff, unsigned long flags)
166{
167 struct mm_struct *mm = current->mm;
168 struct vm_area_struct *vma;
169 unsigned long task_size = TASK_SIZE;
170
171 if (test_thread_flag(TIF_32BIT))
172 task_size = STACK_TOP32;
173
174 if (len & ~HPAGE_MASK)
175 return -EINVAL;
176 if (len > task_size)
177 return -ENOMEM;
178
179 if (addr) {
180 addr = ALIGN(addr, HPAGE_SIZE);
181 vma = find_vma(mm, addr);
182 if (task_size - len >= addr &&
183 (!vma || addr + len <= vma->vm_start))
184 return addr;
185 }
186 if (mm->get_unmapped_area == arch_get_unmapped_area)
187 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
188 pgoff, flags);
189 else
190 return hugetlb_get_unmapped_area_topdown(file, addr, len,
191 pgoff, flags);
192}
193
25pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 194pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
26{ 195{
27 pgd_t *pgd; 196 pgd_t *pgd;
@@ -48,12 +217,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
48 pmd_t *pmd; 217 pmd_t *pmd;
49 pte_t *pte = NULL; 218 pte_t *pte = NULL;
50 219
220 addr &= HPAGE_MASK;
221
51 pgd = pgd_offset(mm, addr); 222 pgd = pgd_offset(mm, addr);
52 if (pgd) { 223 if (!pgd_none(*pgd)) {
53 pud = pud_offset(pgd, addr); 224 pud = pud_offset(pgd, addr);
54 if (pud) { 225 if (!pud_none(*pud)) {
55 pmd = pmd_offset(pud, addr); 226 pmd = pmd_offset(pud, addr);
56 if (pmd) 227 if (!pmd_none(*pmd))
57 pte = pte_offset_map(pmd, addr); 228 pte = pte_offset_map(pmd, addr);
58 } 229 }
59 } 230 }
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 1e44ee26cee8..c2b556106fc1 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <linux/string.h> 12#include <linux/string.h>
@@ -39,9 +40,27 @@
39#include <asm/tlb.h> 40#include <asm/tlb.h>
40#include <asm/spitfire.h> 41#include <asm/spitfire.h>
41#include <asm/sections.h> 42#include <asm/sections.h>
43#include <asm/tsb.h>
44#include <asm/hypervisor.h>
42 45
43extern void device_scan(void); 46extern void device_scan(void);
44 47
48#define MAX_PHYS_ADDRESS (1UL << 42UL)
49#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
50#define KPTE_BITMAP_BYTES \
51 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
52
53unsigned long kern_linear_pte_xor[2] __read_mostly;
54
55/* A bitmap, one bit for every 256MB of physical memory. If the bit
56 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
57 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
58 */
59unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
60
61/* A special kernel TSB for 4MB and 256MB linear mappings. */
62struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
63
45#define MAX_BANKS 32 64#define MAX_BANKS 32
46 65
47static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; 66static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
@@ -111,11 +130,9 @@ static void __init read_obp_memory(const char *property,
111 130
112unsigned long *sparc64_valid_addr_bitmap __read_mostly; 131unsigned long *sparc64_valid_addr_bitmap __read_mostly;
113 132
114/* Ugly, but necessary... -DaveM */ 133/* Kernel physical address base and size in bytes. */
115unsigned long phys_base __read_mostly;
116unsigned long kern_base __read_mostly; 134unsigned long kern_base __read_mostly;
117unsigned long kern_size __read_mostly; 135unsigned long kern_size __read_mostly;
118unsigned long pfn_base __read_mostly;
119 136
120/* get_new_mmu_context() uses "cache + 1". */ 137/* get_new_mmu_context() uses "cache + 1". */
121DEFINE_SPINLOCK(ctx_alloc_lock); 138DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -141,24 +158,28 @@ unsigned long sparc64_kern_sec_context __read_mostly;
141 158
142int bigkernel = 0; 159int bigkernel = 0;
143 160
144/* XXX Tune this... */ 161kmem_cache_t *pgtable_cache __read_mostly;
145#define PGT_CACHE_LOW 25 162
146#define PGT_CACHE_HIGH 50 163static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
164{
165 clear_page(addr);
166}
167
168extern void tsb_cache_init(void);
147 169
148void check_pgt_cache(void) 170void pgtable_cache_init(void)
149{ 171{
150 preempt_disable(); 172 pgtable_cache = kmem_cache_create("pgtable_cache",
151 if (pgtable_cache_size > PGT_CACHE_HIGH) { 173 PAGE_SIZE, PAGE_SIZE,
152 do { 174 SLAB_HWCACHE_ALIGN |
153 if (pgd_quicklist) 175 SLAB_MUST_HWCACHE_ALIGN,
154 free_pgd_slow(get_pgd_fast()); 176 zero_ctor,
155 if (pte_quicklist[0]) 177 NULL);
156 free_pte_slow(pte_alloc_one_fast(NULL, 0)); 178 if (!pgtable_cache) {
157 if (pte_quicklist[1]) 179 prom_printf("Could not create pgtable_cache\n");
158 free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))); 180 prom_halt();
159 } while (pgtable_cache_size > PGT_CACHE_LOW);
160 } 181 }
161 preempt_enable(); 182 tsb_cache_init();
162} 183}
163 184
164#ifdef CONFIG_DEBUG_DCFLUSH 185#ifdef CONFIG_DEBUG_DCFLUSH
@@ -168,8 +189,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
168#endif 189#endif
169#endif 190#endif
170 191
171__inline__ void flush_dcache_page_impl(struct page *page) 192inline void flush_dcache_page_impl(struct page *page)
172{ 193{
194 BUG_ON(tlb_type == hypervisor);
173#ifdef CONFIG_DEBUG_DCFLUSH 195#ifdef CONFIG_DEBUG_DCFLUSH
174 atomic_inc(&dcpage_flushes); 196 atomic_inc(&dcpage_flushes);
175#endif 197#endif
@@ -186,8 +208,8 @@ __inline__ void flush_dcache_page_impl(struct page *page)
186} 208}
187 209
188#define PG_dcache_dirty PG_arch_1 210#define PG_dcache_dirty PG_arch_1
189#define PG_dcache_cpu_shift 24 211#define PG_dcache_cpu_shift 24UL
190#define PG_dcache_cpu_mask (256 - 1) 212#define PG_dcache_cpu_mask (256UL - 1UL)
191 213
192#if NR_CPUS > 256 214#if NR_CPUS > 256
193#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus 215#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
@@ -243,32 +265,61 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
243 : "g1", "g7"); 265 : "g1", "g7");
244} 266}
245 267
268static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
269{
270 unsigned long tsb_addr = (unsigned long) ent;
271
272 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
273 tsb_addr = __pa(tsb_addr);
274
275 __tsb_insert(tsb_addr, tag, pte);
276}
277
278unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
279unsigned long _PAGE_SZBITS __read_mostly;
280
246void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 281void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
247{ 282{
248 struct page *page; 283 struct mm_struct *mm;
249 unsigned long pfn; 284 struct tsb *tsb;
250 unsigned long pg_flags; 285 unsigned long tag, flags;
251 286
252 pfn = pte_pfn(pte); 287 if (tlb_type != hypervisor) {
253 if (pfn_valid(pfn) && 288 unsigned long pfn = pte_pfn(pte);
254 (page = pfn_to_page(pfn), page_mapping(page)) && 289 unsigned long pg_flags;
255 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { 290 struct page *page;
256 int cpu = ((pg_flags >> PG_dcache_cpu_shift) & 291
257 PG_dcache_cpu_mask); 292 if (pfn_valid(pfn) &&
258 int this_cpu = get_cpu(); 293 (page = pfn_to_page(pfn), page_mapping(page)) &&
259 294 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
260 /* This is just to optimize away some function calls 295 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
261 * in the SMP case. 296 PG_dcache_cpu_mask);
262 */ 297 int this_cpu = get_cpu();
263 if (cpu == this_cpu) 298
264 flush_dcache_page_impl(page); 299 /* This is just to optimize away some function calls
265 else 300 * in the SMP case.
266 smp_flush_dcache_page_impl(page, cpu); 301 */
302 if (cpu == this_cpu)
303 flush_dcache_page_impl(page);
304 else
305 smp_flush_dcache_page_impl(page, cpu);
267 306
268 clear_dcache_dirty_cpu(page, cpu); 307 clear_dcache_dirty_cpu(page, cpu);
269 308
270 put_cpu(); 309 put_cpu();
310 }
271 } 311 }
312
313 mm = vma->vm_mm;
314
315 spin_lock_irqsave(&mm->context.lock, flags);
316
317 tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
318 (mm->context.tsb_nentries - 1UL)];
319 tag = (address >> 22UL);
320 tsb_insert(tsb, tag, pte_val(pte));
321
322 spin_unlock_irqrestore(&mm->context.lock, flags);
272} 323}
273 324
274void flush_dcache_page(struct page *page) 325void flush_dcache_page(struct page *page)
@@ -276,6 +327,9 @@ void flush_dcache_page(struct page *page)
276 struct address_space *mapping; 327 struct address_space *mapping;
277 int this_cpu; 328 int this_cpu;
278 329
330 if (tlb_type == hypervisor)
331 return;
332
279 /* Do not bother with the expensive D-cache flush if it 333 /* Do not bother with the expensive D-cache flush if it
280 * is merely the zero page. The 'bigcore' testcase in GDB 334 * is merely the zero page. The 'bigcore' testcase in GDB
281 * causes this case to run millions of times. 335 * causes this case to run millions of times.
@@ -311,7 +365,7 @@ out:
311 365
312void __kprobes flush_icache_range(unsigned long start, unsigned long end) 366void __kprobes flush_icache_range(unsigned long start, unsigned long end)
313{ 367{
314 /* Cheetah has coherent I-cache. */ 368 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
315 if (tlb_type == spitfire) { 369 if (tlb_type == spitfire) {
316 unsigned long kaddr; 370 unsigned long kaddr;
317 371
@@ -320,16 +374,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
320 } 374 }
321} 375}
322 376
323unsigned long page_to_pfn(struct page *page)
324{
325 return (unsigned long) ((page - mem_map) + pfn_base);
326}
327
328struct page *pfn_to_page(unsigned long pfn)
329{
330 return (mem_map + (pfn - pfn_base));
331}
332
333void show_mem(void) 377void show_mem(void)
334{ 378{
335 printk("Mem-info:\n"); 379 printk("Mem-info:\n");
@@ -338,7 +382,6 @@ void show_mem(void)
338 nr_swap_pages << (PAGE_SHIFT-10)); 382 nr_swap_pages << (PAGE_SHIFT-10));
339 printk("%ld pages of RAM\n", num_physpages); 383 printk("%ld pages of RAM\n", num_physpages);
340 printk("%d free pages\n", nr_free_pages()); 384 printk("%d free pages\n", nr_free_pages());
341 printk("%d pages in page table cache\n",pgtable_cache_size);
342} 385}
343 386
344void mmu_info(struct seq_file *m) 387void mmu_info(struct seq_file *m)
@@ -349,6 +392,8 @@ void mmu_info(struct seq_file *m)
349 seq_printf(m, "MMU Type\t: Cheetah+\n"); 392 seq_printf(m, "MMU Type\t: Cheetah+\n");
350 else if (tlb_type == spitfire) 393 else if (tlb_type == spitfire)
351 seq_printf(m, "MMU Type\t: Spitfire\n"); 394 seq_printf(m, "MMU Type\t: Spitfire\n");
395 else if (tlb_type == hypervisor)
396 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
352 else 397 else
353 seq_printf(m, "MMU Type\t: ???\n"); 398 seq_printf(m, "MMU Type\t: ???\n");
354 399
@@ -371,45 +416,13 @@ struct linux_prom_translation {
371/* Exported for kernel TLB miss handling in ktlb.S */ 416/* Exported for kernel TLB miss handling in ktlb.S */
372struct linux_prom_translation prom_trans[512] __read_mostly; 417struct linux_prom_translation prom_trans[512] __read_mostly;
373unsigned int prom_trans_ents __read_mostly; 418unsigned int prom_trans_ents __read_mostly;
374unsigned int swapper_pgd_zero __read_mostly;
375
376extern unsigned long prom_boot_page;
377extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
378extern int prom_get_mmu_ihandle(void);
379extern void register_prom_callbacks(void);
380 419
381/* Exported for SMP bootup purposes. */ 420/* Exported for SMP bootup purposes. */
382unsigned long kern_locked_tte_data; 421unsigned long kern_locked_tte_data;
383 422
384/*
385 * Translate PROM's mapping we capture at boot time into physical address.
386 * The second parameter is only set from prom_callback() invocations.
387 */
388unsigned long prom_virt_to_phys(unsigned long promva, int *error)
389{
390 int i;
391
392 for (i = 0; i < prom_trans_ents; i++) {
393 struct linux_prom_translation *p = &prom_trans[i];
394
395 if (promva >= p->virt &&
396 promva < (p->virt + p->size)) {
397 unsigned long base = p->data & _PAGE_PADDR;
398
399 if (error)
400 *error = 0;
401 return base + (promva & (8192 - 1));
402 }
403 }
404 if (error)
405 *error = 1;
406 return 0UL;
407}
408
409/* The obp translations are saved based on 8k pagesize, since obp can 423/* The obp translations are saved based on 8k pagesize, since obp can
410 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 424 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
411 * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte 425 * HI_OBP_ADDRESS range are handled in ktlb.S.
412 * scheme (also, see rant in inherit_locked_prom_mappings()).
413 */ 426 */
414static inline int in_obp_range(unsigned long vaddr) 427static inline int in_obp_range(unsigned long vaddr)
415{ 428{
@@ -490,6 +503,36 @@ static void __init read_obp_translations(void)
490 } 503 }
491} 504}
492 505
506static void __init hypervisor_tlb_lock(unsigned long vaddr,
507 unsigned long pte,
508 unsigned long mmu)
509{
510 register unsigned long func asm("%o5");
511 register unsigned long arg0 asm("%o0");
512 register unsigned long arg1 asm("%o1");
513 register unsigned long arg2 asm("%o2");
514 register unsigned long arg3 asm("%o3");
515
516 func = HV_FAST_MMU_MAP_PERM_ADDR;
517 arg0 = vaddr;
518 arg1 = 0;
519 arg2 = pte;
520 arg3 = mmu;
521 __asm__ __volatile__("ta 0x80"
522 : "=&r" (func), "=&r" (arg0),
523 "=&r" (arg1), "=&r" (arg2),
524 "=&r" (arg3)
525 : "0" (func), "1" (arg0), "2" (arg1),
526 "3" (arg2), "4" (arg3));
527 if (arg0 != 0) {
528 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
529 "errors with %lx\n", vaddr, 0, pte, mmu, arg0);
530 prom_halt();
531 }
532}
533
534static unsigned long kern_large_tte(unsigned long paddr);
535
493static void __init remap_kernel(void) 536static void __init remap_kernel(void)
494{ 537{
495 unsigned long phys_page, tte_vaddr, tte_data; 538 unsigned long phys_page, tte_vaddr, tte_data;
@@ -497,25 +540,34 @@ static void __init remap_kernel(void)
497 540
498 tte_vaddr = (unsigned long) KERNBASE; 541 tte_vaddr = (unsigned long) KERNBASE;
499 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 542 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
500 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | 543 tte_data = kern_large_tte(phys_page);
501 _PAGE_CP | _PAGE_CV | _PAGE_P |
502 _PAGE_L | _PAGE_W));
503 544
504 kern_locked_tte_data = tte_data; 545 kern_locked_tte_data = tte_data;
505 546
506 /* Now lock us into the TLBs via OBP. */ 547 /* Now lock us into the TLBs via Hypervisor or OBP. */
507 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); 548 if (tlb_type == hypervisor) {
508 prom_itlb_load(tlb_ent, tte_data, tte_vaddr); 549 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
509 if (bigkernel) { 550 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
510 tlb_ent -= 1; 551 if (bigkernel) {
511 prom_dtlb_load(tlb_ent, 552 tte_vaddr += 0x400000;
512 tte_data + 0x400000, 553 tte_data += 0x400000;
513 tte_vaddr + 0x400000); 554 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
514 prom_itlb_load(tlb_ent, 555 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
515 tte_data + 0x400000, 556 }
516 tte_vaddr + 0x400000); 557 } else {
558 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
559 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
560 if (bigkernel) {
561 tlb_ent -= 1;
562 prom_dtlb_load(tlb_ent,
563 tte_data + 0x400000,
564 tte_vaddr + 0x400000);
565 prom_itlb_load(tlb_ent,
566 tte_data + 0x400000,
567 tte_vaddr + 0x400000);
568 }
569 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
517 } 570 }
518 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
519 if (tlb_type == cheetah_plus) { 571 if (tlb_type == cheetah_plus) {
520 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | 572 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
521 CTX_CHEETAH_PLUS_NUC); 573 CTX_CHEETAH_PLUS_NUC);
@@ -533,372 +585,14 @@ static void __init inherit_prom_mappings(void)
533 prom_printf("Remapping the kernel... "); 585 prom_printf("Remapping the kernel... ");
534 remap_kernel(); 586 remap_kernel();
535 prom_printf("done.\n"); 587 prom_printf("done.\n");
536
537 prom_printf("Registering callbacks... ");
538 register_prom_callbacks();
539 prom_printf("done.\n");
540}
541
542/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
543 * upwards as reserved for use by the firmware (I wonder if this
544 * will be the same on Cheetah...). We use this virtual address
545 * range for the VPTE table mappings of the nucleus so we need
546 * to zap them when we enter the PROM. -DaveM
547 */
548static void __flush_nucleus_vptes(void)
549{
550 unsigned long prom_reserved_base = 0xfffffffc00000000UL;
551 int i;
552
553 /* Only DTLB must be checked for VPTE entries. */
554 if (tlb_type == spitfire) {
555 for (i = 0; i < 63; i++) {
556 unsigned long tag;
557
558 /* Spitfire Errata #32 workaround */
559 /* NOTE: Always runs on spitfire, so no cheetah+
560 * page size encodings.
561 */
562 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
563 "flush %%g6"
564 : /* No outputs */
565 : "r" (0),
566 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
567
568 tag = spitfire_get_dtlb_tag(i);
569 if (((tag & ~(PAGE_MASK)) == 0) &&
570 ((tag & (PAGE_MASK)) >= prom_reserved_base)) {
571 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
572 "membar #Sync"
573 : /* no outputs */
574 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
575 spitfire_put_dtlb_data(i, 0x0UL);
576 }
577 }
578 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
579 for (i = 0; i < 512; i++) {
580 unsigned long tag = cheetah_get_dtlb_tag(i, 2);
581
582 if ((tag & ~PAGE_MASK) == 0 &&
583 (tag & PAGE_MASK) >= prom_reserved_base) {
584 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
585 "membar #Sync"
586 : /* no outputs */
587 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
588 cheetah_put_dtlb_data(i, 0x0UL, 2);
589 }
590
591 if (tlb_type != cheetah_plus)
592 continue;
593
594 tag = cheetah_get_dtlb_tag(i, 3);
595
596 if ((tag & ~PAGE_MASK) == 0 &&
597 (tag & PAGE_MASK) >= prom_reserved_base) {
598 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
599 "membar #Sync"
600 : /* no outputs */
601 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
602 cheetah_put_dtlb_data(i, 0x0UL, 3);
603 }
604 }
605 } else {
606 /* Implement me :-) */
607 BUG();
608 }
609} 588}
610 589
611static int prom_ditlb_set;
612struct prom_tlb_entry {
613 int tlb_ent;
614 unsigned long tlb_tag;
615 unsigned long tlb_data;
616};
617struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
618
619void prom_world(int enter) 590void prom_world(int enter)
620{ 591{
621 unsigned long pstate;
622 int i;
623
624 if (!enter) 592 if (!enter)
625 set_fs((mm_segment_t) { get_thread_current_ds() }); 593 set_fs((mm_segment_t) { get_thread_current_ds() });
626 594
627 if (!prom_ditlb_set) 595 __asm__ __volatile__("flushw");
628 return;
629
630 /* Make sure the following runs atomically. */
631 __asm__ __volatile__("flushw\n\t"
632 "rdpr %%pstate, %0\n\t"
633 "wrpr %0, %1, %%pstate"
634 : "=r" (pstate)
635 : "i" (PSTATE_IE));
636
637 if (enter) {
638 /* Kick out nucleus VPTEs. */
639 __flush_nucleus_vptes();
640
641 /* Install PROM world. */
642 for (i = 0; i < 16; i++) {
643 if (prom_dtlb[i].tlb_ent != -1) {
644 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
645 "membar #Sync"
646 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
647 "i" (ASI_DMMU));
648 if (tlb_type == spitfire)
649 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
650 prom_dtlb[i].tlb_data);
651 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
652 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
653 prom_dtlb[i].tlb_data);
654 }
655 if (prom_itlb[i].tlb_ent != -1) {
656 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
657 "membar #Sync"
658 : : "r" (prom_itlb[i].tlb_tag),
659 "r" (TLB_TAG_ACCESS),
660 "i" (ASI_IMMU));
661 if (tlb_type == spitfire)
662 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
663 prom_itlb[i].tlb_data);
664 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
665 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
666 prom_itlb[i].tlb_data);
667 }
668 }
669 } else {
670 for (i = 0; i < 16; i++) {
671 if (prom_dtlb[i].tlb_ent != -1) {
672 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
673 "membar #Sync"
674 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
675 if (tlb_type == spitfire)
676 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
677 else
678 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
679 }
680 if (prom_itlb[i].tlb_ent != -1) {
681 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
682 "membar #Sync"
683 : : "r" (TLB_TAG_ACCESS),
684 "i" (ASI_IMMU));
685 if (tlb_type == spitfire)
686 spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
687 else
688 cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
689 }
690 }
691 }
692 __asm__ __volatile__("wrpr %0, 0, %%pstate"
693 : : "r" (pstate));
694}
695
696void inherit_locked_prom_mappings(int save_p)
697{
698 int i;
699 int dtlb_seen = 0;
700 int itlb_seen = 0;
701
702 /* Fucking losing PROM has more mappings in the TLB, but
703 * it (conveniently) fails to mention any of these in the
704 * translations property. The only ones that matter are
705 * the locked PROM tlb entries, so we impose the following
706 * irrecovable rule on the PROM, it is allowed 8 locked
707 * entries in the ITLB and 8 in the DTLB.
708 *
709 * Supposedly the upper 16GB of the address space is
710 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
711 * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
712 * used between the client program and the firmware on sun5
713 * systems to coordinate mmu mappings is also COMPLETELY
714 * UNDOCUMENTED!!!!!! Thanks S(t)un!
715 */
716 if (save_p) {
717 for (i = 0; i < 16; i++) {
718 prom_itlb[i].tlb_ent = -1;
719 prom_dtlb[i].tlb_ent = -1;
720 }
721 }
722 if (tlb_type == spitfire) {
723 int high = sparc64_highest_unlocked_tlb_ent;
724 for (i = 0; i <= high; i++) {
725 unsigned long data;
726
727 /* Spitfire Errata #32 workaround */
728 /* NOTE: Always runs on spitfire, so no cheetah+
729 * page size encodings.
730 */
731 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
732 "flush %%g6"
733 : /* No outputs */
734 : "r" (0),
735 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
736
737 data = spitfire_get_dtlb_data(i);
738 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
739 unsigned long tag;
740
741 /* Spitfire Errata #32 workaround */
742 /* NOTE: Always runs on spitfire, so no
743 * cheetah+ page size encodings.
744 */
745 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
746 "flush %%g6"
747 : /* No outputs */
748 : "r" (0),
749 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
750
751 tag = spitfire_get_dtlb_tag(i);
752 if (save_p) {
753 prom_dtlb[dtlb_seen].tlb_ent = i;
754 prom_dtlb[dtlb_seen].tlb_tag = tag;
755 prom_dtlb[dtlb_seen].tlb_data = data;
756 }
757 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
758 "membar #Sync"
759 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
760 spitfire_put_dtlb_data(i, 0x0UL);
761
762 dtlb_seen++;
763 if (dtlb_seen > 15)
764 break;
765 }
766 }
767
768 for (i = 0; i < high; i++) {
769 unsigned long data;
770
771 /* Spitfire Errata #32 workaround */
772 /* NOTE: Always runs on spitfire, so no
773 * cheetah+ page size encodings.
774 */
775 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
776 "flush %%g6"
777 : /* No outputs */
778 : "r" (0),
779 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
780
781 data = spitfire_get_itlb_data(i);
782 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
783 unsigned long tag;
784
785 /* Spitfire Errata #32 workaround */
786 /* NOTE: Always runs on spitfire, so no
787 * cheetah+ page size encodings.
788 */
789 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
790 "flush %%g6"
791 : /* No outputs */
792 : "r" (0),
793 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
794
795 tag = spitfire_get_itlb_tag(i);
796 if (save_p) {
797 prom_itlb[itlb_seen].tlb_ent = i;
798 prom_itlb[itlb_seen].tlb_tag = tag;
799 prom_itlb[itlb_seen].tlb_data = data;
800 }
801 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
802 "membar #Sync"
803 : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
804 spitfire_put_itlb_data(i, 0x0UL);
805
806 itlb_seen++;
807 if (itlb_seen > 15)
808 break;
809 }
810 }
811 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
812 int high = sparc64_highest_unlocked_tlb_ent;
813
814 for (i = 0; i <= high; i++) {
815 unsigned long data;
816
817 data = cheetah_get_ldtlb_data(i);
818 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
819 unsigned long tag;
820
821 tag = cheetah_get_ldtlb_tag(i);
822 if (save_p) {
823 prom_dtlb[dtlb_seen].tlb_ent = i;
824 prom_dtlb[dtlb_seen].tlb_tag = tag;
825 prom_dtlb[dtlb_seen].tlb_data = data;
826 }
827 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
828 "membar #Sync"
829 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
830 cheetah_put_ldtlb_data(i, 0x0UL);
831
832 dtlb_seen++;
833 if (dtlb_seen > 15)
834 break;
835 }
836 }
837
838 for (i = 0; i < high; i++) {
839 unsigned long data;
840
841 data = cheetah_get_litlb_data(i);
842 if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
843 unsigned long tag;
844
845 tag = cheetah_get_litlb_tag(i);
846 if (save_p) {
847 prom_itlb[itlb_seen].tlb_ent = i;
848 prom_itlb[itlb_seen].tlb_tag = tag;
849 prom_itlb[itlb_seen].tlb_data = data;
850 }
851 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
852 "membar #Sync"
853 : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
854 cheetah_put_litlb_data(i, 0x0UL);
855
856 itlb_seen++;
857 if (itlb_seen > 15)
858 break;
859 }
860 }
861 } else {
862 /* Implement me :-) */
863 BUG();
864 }
865 if (save_p)
866 prom_ditlb_set = 1;
867}
868
869/* Give PROM back his world, done during reboots... */
870void prom_reload_locked(void)
871{
872 int i;
873
874 for (i = 0; i < 16; i++) {
875 if (prom_dtlb[i].tlb_ent != -1) {
876 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
877 "membar #Sync"
878 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
879 "i" (ASI_DMMU));
880 if (tlb_type == spitfire)
881 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
882 prom_dtlb[i].tlb_data);
883 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
884 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
885 prom_dtlb[i].tlb_data);
886 }
887
888 if (prom_itlb[i].tlb_ent != -1) {
889 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
890 "membar #Sync"
891 : : "r" (prom_itlb[i].tlb_tag),
892 "r" (TLB_TAG_ACCESS),
893 "i" (ASI_IMMU));
894 if (tlb_type == spitfire)
895 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
896 prom_itlb[i].tlb_data);
897 else
898 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
899 prom_itlb[i].tlb_data);
900 }
901 }
902} 596}
903 597
904#ifdef DCACHE_ALIASING_POSSIBLE 598#ifdef DCACHE_ALIASING_POSSIBLE
@@ -914,7 +608,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
914 if (++n >= 512) 608 if (++n >= 512)
915 break; 609 break;
916 } 610 }
917 } else { 611 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
918 start = __pa(start); 612 start = __pa(start);
919 end = __pa(end); 613 end = __pa(end);
920 for (va = start; va < end; va += 32) 614 for (va = start; va < end; va += 32)
@@ -927,63 +621,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
927} 621}
928#endif /* DCACHE_ALIASING_POSSIBLE */ 622#endif /* DCACHE_ALIASING_POSSIBLE */
929 623
930/* If not locked, zap it. */
931void __flush_tlb_all(void)
932{
933 unsigned long pstate;
934 int i;
935
936 __asm__ __volatile__("flushw\n\t"
937 "rdpr %%pstate, %0\n\t"
938 "wrpr %0, %1, %%pstate"
939 : "=r" (pstate)
940 : "i" (PSTATE_IE));
941 if (tlb_type == spitfire) {
942 for (i = 0; i < 64; i++) {
943 /* Spitfire Errata #32 workaround */
944 /* NOTE: Always runs on spitfire, so no
945 * cheetah+ page size encodings.
946 */
947 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
948 "flush %%g6"
949 : /* No outputs */
950 : "r" (0),
951 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
952
953 if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
954 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
955 "membar #Sync"
956 : /* no outputs */
957 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
958 spitfire_put_dtlb_data(i, 0x0UL);
959 }
960
961 /* Spitfire Errata #32 workaround */
962 /* NOTE: Always runs on spitfire, so no
963 * cheetah+ page size encodings.
964 */
965 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
966 "flush %%g6"
967 : /* No outputs */
968 : "r" (0),
969 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
970
971 if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
972 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
973 "membar #Sync"
974 : /* no outputs */
975 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
976 spitfire_put_itlb_data(i, 0x0UL);
977 }
978 }
979 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
980 cheetah_flush_dtlb_all();
981 cheetah_flush_itlb_all();
982 }
983 __asm__ __volatile__("wrpr %0, 0, %%pstate"
984 : : "r" (pstate));
985}
986
987/* Caller does TLB context flushing on local CPU if necessary. 624/* Caller does TLB context flushing on local CPU if necessary.
988 * The caller also ensures that CTX_VALID(mm->context) is false. 625 * The caller also ensures that CTX_VALID(mm->context) is false.
989 * 626 *
@@ -991,17 +628,21 @@ void __flush_tlb_all(void)
991 * let the user have CTX 0 (nucleus) or we ever use a CTX 628 * let the user have CTX 0 (nucleus) or we ever use a CTX
992 * version of zero (and thus NO_CONTEXT would not be caught 629 * version of zero (and thus NO_CONTEXT would not be caught
993 * by version mis-match tests in mmu_context.h). 630 * by version mis-match tests in mmu_context.h).
631 *
632 * Always invoked with interrupts disabled.
994 */ 633 */
995void get_new_mmu_context(struct mm_struct *mm) 634void get_new_mmu_context(struct mm_struct *mm)
996{ 635{
997 unsigned long ctx, new_ctx; 636 unsigned long ctx, new_ctx;
998 unsigned long orig_pgsz_bits; 637 unsigned long orig_pgsz_bits;
999 638 unsigned long flags;
639 int new_version;
1000 640
1001 spin_lock(&ctx_alloc_lock); 641 spin_lock_irqsave(&ctx_alloc_lock, flags);
1002 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 642 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
1003 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 643 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
1004 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 644 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
645 new_version = 0;
1005 if (new_ctx >= (1 << CTX_NR_BITS)) { 646 if (new_ctx >= (1 << CTX_NR_BITS)) {
1006 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 647 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
1007 if (new_ctx >= ctx) { 648 if (new_ctx >= ctx) {
@@ -1024,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm)
1024 mmu_context_bmap[i + 2] = 0; 665 mmu_context_bmap[i + 2] = 0;
1025 mmu_context_bmap[i + 3] = 0; 666 mmu_context_bmap[i + 3] = 0;
1026 } 667 }
668 new_version = 1;
1027 goto out; 669 goto out;
1028 } 670 }
1029 } 671 }
@@ -1032,79 +674,10 @@ void get_new_mmu_context(struct mm_struct *mm)
1032out: 674out:
1033 tlb_context_cache = new_ctx; 675 tlb_context_cache = new_ctx;
1034 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 676 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
1035 spin_unlock(&ctx_alloc_lock); 677 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
1036}
1037
1038#ifndef CONFIG_SMP
1039struct pgtable_cache_struct pgt_quicklists;
1040#endif
1041
1042/* OK, we have to color these pages. The page tables are accessed
1043 * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
1044 * code, as well as by PAGE_OFFSET range direct-mapped addresses by
1045 * other parts of the kernel. By coloring, we make sure that the tlbmiss
1046 * fast handlers do not get data from old/garbage dcache lines that
1047 * correspond to an old/stale virtual address (user/kernel) that
1048 * previously mapped the pagetable page while accessing vpte range
1049 * addresses. The idea is that if the vpte color and PAGE_OFFSET range
1050 * color is the same, then when the kernel initializes the pagetable
1051 * using the later address range, accesses with the first address
1052 * range will see the newly initialized data rather than the garbage.
1053 */
1054#ifdef DCACHE_ALIASING_POSSIBLE
1055#define DC_ALIAS_SHIFT 1
1056#else
1057#define DC_ALIAS_SHIFT 0
1058#endif
1059pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
1060{
1061 struct page *page;
1062 unsigned long color;
1063
1064 {
1065 pte_t *ptep = pte_alloc_one_fast(mm, address);
1066
1067 if (ptep)
1068 return ptep;
1069 }
1070 678
1071 color = VPTE_COLOR(address); 679 if (unlikely(new_version))
1072 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT); 680 smp_new_mmu_context_version();
1073 if (page) {
1074 unsigned long *to_free;
1075 unsigned long paddr;
1076 pte_t *pte;
1077
1078#ifdef DCACHE_ALIASING_POSSIBLE
1079 set_page_count(page, 1);
1080 ClearPageCompound(page);
1081
1082 set_page_count((page + 1), 1);
1083 ClearPageCompound(page + 1);
1084#endif
1085 paddr = (unsigned long) page_address(page);
1086 memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
1087
1088 if (!color) {
1089 pte = (pte_t *) paddr;
1090 to_free = (unsigned long *) (paddr + PAGE_SIZE);
1091 } else {
1092 pte = (pte_t *) (paddr + PAGE_SIZE);
1093 to_free = (unsigned long *) paddr;
1094 }
1095
1096#ifdef DCACHE_ALIASING_POSSIBLE
1097 /* Now free the other one up, adjust cache size. */
1098 preempt_disable();
1099 *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
1100 pte_quicklist[color ^ 0x1] = to_free;
1101 pgtable_cache_size++;
1102 preempt_enable();
1103#endif
1104
1105 return pte;
1106 }
1107 return NULL;
1108} 681}
1109 682
1110void sparc_ultra_dump_itlb(void) 683void sparc_ultra_dump_itlb(void)
@@ -1196,9 +769,78 @@ void sparc_ultra_dump_dtlb(void)
1196 769
1197extern unsigned long cmdline_memory_size; 770extern unsigned long cmdline_memory_size;
1198 771
1199unsigned long __init bootmem_init(unsigned long *pages_avail) 772/* Find a free area for the bootmem map, avoiding the kernel image
773 * and the initial ramdisk.
774 */
775static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
776 unsigned long end_pfn)
777{
778 unsigned long avoid_start, avoid_end, bootmap_size;
779 int i;
780
781 bootmap_size = ((end_pfn - start_pfn) + 7) / 8;
782 bootmap_size = ALIGN(bootmap_size, sizeof(long));
783
784 avoid_start = avoid_end = 0;
785#ifdef CONFIG_BLK_DEV_INITRD
786 avoid_start = initrd_start;
787 avoid_end = PAGE_ALIGN(initrd_end);
788#endif
789
790#ifdef CONFIG_DEBUG_BOOTMEM
791 prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n",
792 kern_base, PAGE_ALIGN(kern_base + kern_size),
793 avoid_start, avoid_end);
794#endif
795 for (i = 0; i < pavail_ents; i++) {
796 unsigned long start, end;
797
798 start = pavail[i].phys_addr;
799 end = start + pavail[i].reg_size;
800
801 while (start < end) {
802 if (start >= kern_base &&
803 start < PAGE_ALIGN(kern_base + kern_size)) {
804 start = PAGE_ALIGN(kern_base + kern_size);
805 continue;
806 }
807 if (start >= avoid_start && start < avoid_end) {
808 start = avoid_end;
809 continue;
810 }
811
812 if ((end - start) < bootmap_size)
813 break;
814
815 if (start < kern_base &&
816 (start + bootmap_size) > kern_base) {
817 start = PAGE_ALIGN(kern_base + kern_size);
818 continue;
819 }
820
821 if (start < avoid_start &&
822 (start + bootmap_size) > avoid_start) {
823 start = avoid_end;
824 continue;
825 }
826
827 /* OK, it doesn't overlap anything, use it. */
828#ifdef CONFIG_DEBUG_BOOTMEM
829 prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n",
830 start >> PAGE_SHIFT, start);
831#endif
832 return start >> PAGE_SHIFT;
833 }
834 }
835
836 prom_printf("Cannot find free area for bootmap, aborting.\n");
837 prom_halt();
838}
839
840static unsigned long __init bootmem_init(unsigned long *pages_avail,
841 unsigned long phys_base)
1200{ 842{
1201 unsigned long bootmap_size, start_pfn, end_pfn; 843 unsigned long bootmap_size, end_pfn;
1202 unsigned long end_of_phys_memory = 0UL; 844 unsigned long end_of_phys_memory = 0UL;
1203 unsigned long bootmap_pfn, bytes_avail, size; 845 unsigned long bootmap_pfn, bytes_avail, size;
1204 int i; 846 int i;
@@ -1236,14 +878,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1236 878
1237 *pages_avail = bytes_avail >> PAGE_SHIFT; 879 *pages_avail = bytes_avail >> PAGE_SHIFT;
1238 880
1239 /* Start with page aligned address of last symbol in kernel
1240 * image. The kernel is hard mapped below PAGE_OFFSET in a
1241 * 4MB locked TLB translation.
1242 */
1243 start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
1244
1245 bootmap_pfn = start_pfn;
1246
1247 end_pfn = end_of_phys_memory >> PAGE_SHIFT; 881 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1248 882
1249#ifdef CONFIG_BLK_DEV_INITRD 883#ifdef CONFIG_BLK_DEV_INITRD
@@ -1260,23 +894,22 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1260 "(0x%016lx > 0x%016lx)\ndisabling initrd\n", 894 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1261 initrd_end, end_of_phys_memory); 895 initrd_end, end_of_phys_memory);
1262 initrd_start = 0; 896 initrd_start = 0;
1263 } 897 initrd_end = 0;
1264 if (initrd_start) {
1265 if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
1266 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
1267 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
1268 } 898 }
1269 } 899 }
1270#endif 900#endif
1271 /* Initialize the boot-time allocator. */ 901 /* Initialize the boot-time allocator. */
1272 max_pfn = max_low_pfn = end_pfn; 902 max_pfn = max_low_pfn = end_pfn;
1273 min_low_pfn = pfn_base; 903 min_low_pfn = (phys_base >> PAGE_SHIFT);
904
905 bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn);
1274 906
1275#ifdef CONFIG_DEBUG_BOOTMEM 907#ifdef CONFIG_DEBUG_BOOTMEM
1276 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", 908 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1277 min_low_pfn, bootmap_pfn, max_low_pfn); 909 min_low_pfn, bootmap_pfn, max_low_pfn);
1278#endif 910#endif
1279 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); 911 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
912 min_low_pfn, end_pfn);
1280 913
1281 /* Now register the available physical memory with the 914 /* Now register the available physical memory with the
1282 * allocator. 915 * allocator.
@@ -1324,9 +957,26 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1324 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); 957 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1325 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; 958 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1326 959
960 for (i = 0; i < pavail_ents; i++) {
961 unsigned long start_pfn, end_pfn;
962
963 start_pfn = pavail[i].phys_addr >> PAGE_SHIFT;
964 end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT));
965#ifdef CONFIG_DEBUG_BOOTMEM
966 prom_printf("memory_present(0, %lx, %lx)\n",
967 start_pfn, end_pfn);
968#endif
969 memory_present(0, start_pfn, end_pfn);
970 }
971
972 sparse_init();
973
1327 return end_pfn; 974 return end_pfn;
1328} 975}
1329 976
977static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
978static int pall_ents __initdata;
979
1330#ifdef CONFIG_DEBUG_PAGEALLOC 980#ifdef CONFIG_DEBUG_PAGEALLOC
1331static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) 981static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1332{ 982{
@@ -1382,14 +1032,44 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend,
1382 return alloc_bytes; 1032 return alloc_bytes;
1383} 1033}
1384 1034
1385static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1386static int pall_ents __initdata;
1387
1388extern unsigned int kvmap_linear_patch[1]; 1035extern unsigned int kvmap_linear_patch[1];
1036#endif /* CONFIG_DEBUG_PAGEALLOC */
1037
1038static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1039{
1040 const unsigned long shift_256MB = 28;
1041 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1042 const unsigned long size_256MB = (1UL << shift_256MB);
1043
1044 while (start < end) {
1045 long remains;
1046
1047 remains = end - start;
1048 if (remains < size_256MB)
1049 break;
1050
1051 if (start & mask_256MB) {
1052 start = (start + size_256MB) & ~mask_256MB;
1053 continue;
1054 }
1055
1056 while (remains >= size_256MB) {
1057 unsigned long index = start >> shift_256MB;
1058
1059 __set_bit(index, kpte_linear_bitmap);
1060
1061 start += size_256MB;
1062 remains -= size_256MB;
1063 }
1064 }
1065}
1389 1066
1390static void __init kernel_physical_mapping_init(void) 1067static void __init kernel_physical_mapping_init(void)
1391{ 1068{
1392 unsigned long i, mem_alloced = 0UL; 1069 unsigned long i;
1070#ifdef CONFIG_DEBUG_PAGEALLOC
1071 unsigned long mem_alloced = 0UL;
1072#endif
1393 1073
1394 read_obp_memory("reg", &pall[0], &pall_ents); 1074 read_obp_memory("reg", &pall[0], &pall_ents);
1395 1075
@@ -1398,10 +1078,16 @@ static void __init kernel_physical_mapping_init(void)
1398 1078
1399 phys_start = pall[i].phys_addr; 1079 phys_start = pall[i].phys_addr;
1400 phys_end = phys_start + pall[i].reg_size; 1080 phys_end = phys_start + pall[i].reg_size;
1081
1082 mark_kpte_bitmap(phys_start, phys_end);
1083
1084#ifdef CONFIG_DEBUG_PAGEALLOC
1401 mem_alloced += kernel_map_range(phys_start, phys_end, 1085 mem_alloced += kernel_map_range(phys_start, phys_end,
1402 PAGE_KERNEL); 1086 PAGE_KERNEL);
1087#endif
1403 } 1088 }
1404 1089
1090#ifdef CONFIG_DEBUG_PAGEALLOC
1405 printk("Allocated %ld bytes for kernel page tables.\n", 1091 printk("Allocated %ld bytes for kernel page tables.\n",
1406 mem_alloced); 1092 mem_alloced);
1407 1093
@@ -1409,8 +1095,10 @@ static void __init kernel_physical_mapping_init(void)
1409 flushi(&kvmap_linear_patch[0]); 1095 flushi(&kvmap_linear_patch[0]);
1410 1096
1411 __flush_tlb_all(); 1097 __flush_tlb_all();
1098#endif
1412} 1099}
1413 1100
1101#ifdef CONFIG_DEBUG_PAGEALLOC
1414void kernel_map_pages(struct page *page, int numpages, int enable) 1102void kernel_map_pages(struct page *page, int numpages, int enable)
1415{ 1103{
1416 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; 1104 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
@@ -1419,6 +1107,9 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1419 kernel_map_range(phys_start, phys_end, 1107 kernel_map_range(phys_start, phys_end,
1420 (enable ? PAGE_KERNEL : __pgprot(0))); 1108 (enable ? PAGE_KERNEL : __pgprot(0)));
1421 1109
1110 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1111 PAGE_OFFSET + phys_end);
1112
1422 /* we should perform an IPI and flush all tlbs, 1113 /* we should perform an IPI and flush all tlbs,
1423 * but that can deadlock->flush only current cpu. 1114 * but that can deadlock->flush only current cpu.
1424 */ 1115 */
@@ -1439,18 +1130,150 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
1439 return ~0UL; 1130 return ~0UL;
1440} 1131}
1441 1132
1133static void __init tsb_phys_patch(void)
1134{
1135 struct tsb_ldquad_phys_patch_entry *pquad;
1136 struct tsb_phys_patch_entry *p;
1137
1138 pquad = &__tsb_ldquad_phys_patch;
1139 while (pquad < &__tsb_ldquad_phys_patch_end) {
1140 unsigned long addr = pquad->addr;
1141
1142 if (tlb_type == hypervisor)
1143 *(unsigned int *) addr = pquad->sun4v_insn;
1144 else
1145 *(unsigned int *) addr = pquad->sun4u_insn;
1146 wmb();
1147 __asm__ __volatile__("flush %0"
1148 : /* no outputs */
1149 : "r" (addr));
1150
1151 pquad++;
1152 }
1153
1154 p = &__tsb_phys_patch;
1155 while (p < &__tsb_phys_patch_end) {
1156 unsigned long addr = p->addr;
1157
1158 *(unsigned int *) addr = p->insn;
1159 wmb();
1160 __asm__ __volatile__("flush %0"
1161 : /* no outputs */
1162 : "r" (addr));
1163
1164 p++;
1165 }
1166}
1167
1168/* Don't mark as init, we give this to the Hypervisor. */
1169static struct hv_tsb_descr ktsb_descr[2];
1170extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1171
1172static void __init sun4v_ktsb_init(void)
1173{
1174 unsigned long ktsb_pa;
1175
1176 /* First KTSB for PAGE_SIZE mappings. */
1177 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1178
1179 switch (PAGE_SIZE) {
1180 case 8 * 1024:
1181 default:
1182 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1183 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1184 break;
1185
1186 case 64 * 1024:
1187 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1188 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1189 break;
1190
1191 case 512 * 1024:
1192 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1193 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1194 break;
1195
1196 case 4 * 1024 * 1024:
1197 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1198 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1199 break;
1200 };
1201
1202 ktsb_descr[0].assoc = 1;
1203 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1204 ktsb_descr[0].ctx_idx = 0;
1205 ktsb_descr[0].tsb_base = ktsb_pa;
1206 ktsb_descr[0].resv = 0;
1207
1208 /* Second KTSB for 4MB/256MB mappings. */
1209 ktsb_pa = (kern_base +
1210 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1211
1212 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1213 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1214 HV_PGSZ_MASK_256MB);
1215 ktsb_descr[1].assoc = 1;
1216 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1217 ktsb_descr[1].ctx_idx = 0;
1218 ktsb_descr[1].tsb_base = ktsb_pa;
1219 ktsb_descr[1].resv = 0;
1220}
1221
1222void __cpuinit sun4v_ktsb_register(void)
1223{
1224 register unsigned long func asm("%o5");
1225 register unsigned long arg0 asm("%o0");
1226 register unsigned long arg1 asm("%o1");
1227 unsigned long pa;
1228
1229 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1230
1231 func = HV_FAST_MMU_TSB_CTX0;
1232 arg0 = 2;
1233 arg1 = pa;
1234 __asm__ __volatile__("ta %6"
1235 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
1236 : "0" (func), "1" (arg0), "2" (arg1),
1237 "i" (HV_FAST_TRAP));
1238}
1239
1442/* paging_init() sets up the page tables */ 1240/* paging_init() sets up the page tables */
1443 1241
1444extern void cheetah_ecache_flush_init(void); 1242extern void cheetah_ecache_flush_init(void);
1243extern void sun4v_patch_tlb_handlers(void);
1445 1244
1446static unsigned long last_valid_pfn; 1245static unsigned long last_valid_pfn;
1447pgd_t swapper_pg_dir[2048]; 1246pgd_t swapper_pg_dir[2048];
1448 1247
1248static void sun4u_pgprot_init(void);
1249static void sun4v_pgprot_init(void);
1250
1449void __init paging_init(void) 1251void __init paging_init(void)
1450{ 1252{
1451 unsigned long end_pfn, pages_avail, shift; 1253 unsigned long end_pfn, pages_avail, shift, phys_base;
1452 unsigned long real_end, i; 1254 unsigned long real_end, i;
1453 1255
1256 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1257 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1258
1259 /* Invalidate both kernel TSBs. */
1260 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1261 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1262
1263 if (tlb_type == hypervisor)
1264 sun4v_pgprot_init();
1265 else
1266 sun4u_pgprot_init();
1267
1268 if (tlb_type == cheetah_plus ||
1269 tlb_type == hypervisor)
1270 tsb_phys_patch();
1271
1272 if (tlb_type == hypervisor) {
1273 sun4v_patch_tlb_handlers();
1274 sun4v_ktsb_init();
1275 }
1276
1454 /* Find available physical memory... */ 1277 /* Find available physical memory... */
1455 read_obp_memory("available", &pavail[0], &pavail_ents); 1278 read_obp_memory("available", &pavail[0], &pavail_ents);
1456 1279
@@ -1458,11 +1281,6 @@ void __init paging_init(void)
1458 for (i = 0; i < pavail_ents; i++) 1281 for (i = 0; i < pavail_ents; i++)
1459 phys_base = min(phys_base, pavail[i].phys_addr); 1282 phys_base = min(phys_base, pavail[i].phys_addr);
1460 1283
1461 pfn_base = phys_base >> PAGE_SHIFT;
1462
1463 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1464 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1465
1466 set_bit(0, mmu_context_bmap); 1284 set_bit(0, mmu_context_bmap);
1467 1285
1468 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1286 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
@@ -1486,47 +1304,38 @@ void __init paging_init(void)
1486 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1304 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1487 swapper_low_pmd_dir + (shift / sizeof(pgd_t))); 1305 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1488 1306
1489 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1490
1491 inherit_prom_mappings(); 1307 inherit_prom_mappings();
1492 1308
1493 /* Ok, we can use our TLB miss and window trap handlers safely. 1309 /* Ok, we can use our TLB miss and window trap handlers safely. */
1494 * We need to do a quick peek here to see if we are on StarFire 1310 setup_tba();
1495 * or not, so setup_tba can setup the IRQ globals correctly (it
1496 * needs to get the hard smp processor id correctly).
1497 */
1498 {
1499 extern void setup_tba(int);
1500 setup_tba(this_is_starfire);
1501 }
1502
1503 inherit_locked_prom_mappings(1);
1504 1311
1505 __flush_tlb_all(); 1312 __flush_tlb_all();
1506 1313
1314 if (tlb_type == hypervisor)
1315 sun4v_ktsb_register();
1316
1507 /* Setup bootmem... */ 1317 /* Setup bootmem... */
1508 pages_avail = 0; 1318 pages_avail = 0;
1509 last_valid_pfn = end_pfn = bootmem_init(&pages_avail); 1319 last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
1320
1321 max_mapnr = last_valid_pfn;
1510 1322
1511#ifdef CONFIG_DEBUG_PAGEALLOC
1512 kernel_physical_mapping_init(); 1323 kernel_physical_mapping_init();
1513#endif
1514 1324
1515 { 1325 {
1516 unsigned long zones_size[MAX_NR_ZONES]; 1326 unsigned long zones_size[MAX_NR_ZONES];
1517 unsigned long zholes_size[MAX_NR_ZONES]; 1327 unsigned long zholes_size[MAX_NR_ZONES];
1518 unsigned long npages;
1519 int znum; 1328 int znum;
1520 1329
1521 for (znum = 0; znum < MAX_NR_ZONES; znum++) 1330 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1522 zones_size[znum] = zholes_size[znum] = 0; 1331 zones_size[znum] = zholes_size[znum] = 0;
1523 1332
1524 npages = end_pfn - pfn_base; 1333 zones_size[ZONE_DMA] = end_pfn;
1525 zones_size[ZONE_DMA] = npages; 1334 zholes_size[ZONE_DMA] = end_pfn - pages_avail;
1526 zholes_size[ZONE_DMA] = npages - pages_avail;
1527 1335
1528 free_area_init_node(0, &contig_page_data, zones_size, 1336 free_area_init_node(0, &contig_page_data, zones_size,
1529 phys_base >> PAGE_SHIFT, zholes_size); 1337 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
1338 zholes_size);
1530 } 1339 }
1531 1340
1532 device_scan(); 1341 device_scan();
@@ -1596,7 +1405,6 @@ void __init mem_init(void)
1596 1405
1597 taint_real_pages(); 1406 taint_real_pages();
1598 1407
1599 max_mapnr = last_valid_pfn - pfn_base;
1600 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 1408 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1601 1409
1602#ifdef CONFIG_DEBUG_BOOTMEM 1410#ifdef CONFIG_DEBUG_BOOTMEM
@@ -1676,3 +1484,342 @@ void free_initrd_mem(unsigned long start, unsigned long end)
1676 } 1484 }
1677} 1485}
1678#endif 1486#endif
1487
1488#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1489#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1490#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1491#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1492#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1493#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1494
1495pgprot_t PAGE_KERNEL __read_mostly;
1496EXPORT_SYMBOL(PAGE_KERNEL);
1497
1498pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
1499pgprot_t PAGE_COPY __read_mostly;
1500
1501pgprot_t PAGE_SHARED __read_mostly;
1502EXPORT_SYMBOL(PAGE_SHARED);
1503
1504pgprot_t PAGE_EXEC __read_mostly;
1505unsigned long pg_iobits __read_mostly;
1506
1507unsigned long _PAGE_IE __read_mostly;
1508
1509unsigned long _PAGE_E __read_mostly;
1510EXPORT_SYMBOL(_PAGE_E);
1511
1512unsigned long _PAGE_CACHE __read_mostly;
1513EXPORT_SYMBOL(_PAGE_CACHE);
1514
1515static void prot_init_common(unsigned long page_none,
1516 unsigned long page_shared,
1517 unsigned long page_copy,
1518 unsigned long page_readonly,
1519 unsigned long page_exec_bit)
1520{
1521 PAGE_COPY = __pgprot(page_copy);
1522 PAGE_SHARED = __pgprot(page_shared);
1523
1524 protection_map[0x0] = __pgprot(page_none);
1525 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
1526 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
1527 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
1528 protection_map[0x4] = __pgprot(page_readonly);
1529 protection_map[0x5] = __pgprot(page_readonly);
1530 protection_map[0x6] = __pgprot(page_copy);
1531 protection_map[0x7] = __pgprot(page_copy);
1532 protection_map[0x8] = __pgprot(page_none);
1533 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
1534 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
1535 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
1536 protection_map[0xc] = __pgprot(page_readonly);
1537 protection_map[0xd] = __pgprot(page_readonly);
1538 protection_map[0xe] = __pgprot(page_shared);
1539 protection_map[0xf] = __pgprot(page_shared);
1540}
1541
1542static void __init sun4u_pgprot_init(void)
1543{
1544 unsigned long page_none, page_shared, page_copy, page_readonly;
1545 unsigned long page_exec_bit;
1546
1547 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1548 _PAGE_CACHE_4U | _PAGE_P_4U |
1549 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1550 _PAGE_EXEC_4U);
1551 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1552 _PAGE_CACHE_4U | _PAGE_P_4U |
1553 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1554 _PAGE_EXEC_4U | _PAGE_L_4U);
1555 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
1556
1557 _PAGE_IE = _PAGE_IE_4U;
1558 _PAGE_E = _PAGE_E_4U;
1559 _PAGE_CACHE = _PAGE_CACHE_4U;
1560
1561 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
1562 __ACCESS_BITS_4U | _PAGE_E_4U);
1563
1564 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
1565 0xfffff80000000000;
1566 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
1567 _PAGE_P_4U | _PAGE_W_4U);
1568
1569 /* XXX Should use 256MB on Panther. XXX */
1570 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1571
1572 _PAGE_SZBITS = _PAGE_SZBITS_4U;
1573 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
1574 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
1575 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
1576
1577
1578 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
1579 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1580 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
1581 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1582 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1583 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1584 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1585
1586 page_exec_bit = _PAGE_EXEC_4U;
1587
1588 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1589 page_exec_bit);
1590}
1591
1592static void __init sun4v_pgprot_init(void)
1593{
1594 unsigned long page_none, page_shared, page_copy, page_readonly;
1595 unsigned long page_exec_bit;
1596
1597 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
1598 _PAGE_CACHE_4V | _PAGE_P_4V |
1599 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
1600 _PAGE_EXEC_4V);
1601 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
1602 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
1603
1604 _PAGE_IE = _PAGE_IE_4V;
1605 _PAGE_E = _PAGE_E_4V;
1606 _PAGE_CACHE = _PAGE_CACHE_4V;
1607
1608 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
1609 0xfffff80000000000;
1610 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1611 _PAGE_P_4V | _PAGE_W_4V);
1612
1613 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1614 0xfffff80000000000;
1615 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1616 _PAGE_P_4V | _PAGE_W_4V);
1617
1618 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
1619 __ACCESS_BITS_4V | _PAGE_E_4V);
1620
1621 _PAGE_SZBITS = _PAGE_SZBITS_4V;
1622 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
1623 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
1624 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
1625 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
1626
1627 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
1628 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1629 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
1630 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1631 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1632 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1633 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1634
1635 page_exec_bit = _PAGE_EXEC_4V;
1636
1637 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1638 page_exec_bit);
1639}
1640
1641unsigned long pte_sz_bits(unsigned long sz)
1642{
1643 if (tlb_type == hypervisor) {
1644 switch (sz) {
1645 case 8 * 1024:
1646 default:
1647 return _PAGE_SZ8K_4V;
1648 case 64 * 1024:
1649 return _PAGE_SZ64K_4V;
1650 case 512 * 1024:
1651 return _PAGE_SZ512K_4V;
1652 case 4 * 1024 * 1024:
1653 return _PAGE_SZ4MB_4V;
1654 };
1655 } else {
1656 switch (sz) {
1657 case 8 * 1024:
1658 default:
1659 return _PAGE_SZ8K_4U;
1660 case 64 * 1024:
1661 return _PAGE_SZ64K_4U;
1662 case 512 * 1024:
1663 return _PAGE_SZ512K_4U;
1664 case 4 * 1024 * 1024:
1665 return _PAGE_SZ4MB_4U;
1666 };
1667 }
1668}
1669
1670pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
1671{
1672 pte_t pte;
1673
1674 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
1675 pte_val(pte) |= (((unsigned long)space) << 32);
1676 pte_val(pte) |= pte_sz_bits(page_size);
1677
1678 return pte;
1679}
1680
1681static unsigned long kern_large_tte(unsigned long paddr)
1682{
1683 unsigned long val;
1684
1685 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1686 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
1687 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
1688 if (tlb_type == hypervisor)
1689 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1690 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
1691 _PAGE_EXEC_4V | _PAGE_W_4V);
1692
1693 return val | paddr;
1694}
1695
1696/*
1697 * Translate PROM's mapping we capture at boot time into physical address.
1698 * The second parameter is only set from prom_callback() invocations.
1699 */
1700unsigned long prom_virt_to_phys(unsigned long promva, int *error)
1701{
1702 unsigned long mask;
1703 int i;
1704
1705 mask = _PAGE_PADDR_4U;
1706 if (tlb_type == hypervisor)
1707 mask = _PAGE_PADDR_4V;
1708
1709 for (i = 0; i < prom_trans_ents; i++) {
1710 struct linux_prom_translation *p = &prom_trans[i];
1711
1712 if (promva >= p->virt &&
1713 promva < (p->virt + p->size)) {
1714 unsigned long base = p->data & mask;
1715
1716 if (error)
1717 *error = 0;
1718 return base + (promva & (8192 - 1));
1719 }
1720 }
1721 if (error)
1722 *error = 1;
1723 return 0UL;
1724}
1725
1726/* XXX We should kill off this ugly thing at so me point. XXX */
1727unsigned long sun4u_get_pte(unsigned long addr)
1728{
1729 pgd_t *pgdp;
1730 pud_t *pudp;
1731 pmd_t *pmdp;
1732 pte_t *ptep;
1733 unsigned long mask = _PAGE_PADDR_4U;
1734
1735 if (tlb_type == hypervisor)
1736 mask = _PAGE_PADDR_4V;
1737
1738 if (addr >= PAGE_OFFSET)
1739 return addr & mask;
1740
1741 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
1742 return prom_virt_to_phys(addr, NULL);
1743
1744 pgdp = pgd_offset_k(addr);
1745 pudp = pud_offset(pgdp, addr);
1746 pmdp = pmd_offset(pudp, addr);
1747 ptep = pte_offset_kernel(pmdp, addr);
1748
1749 return pte_val(*ptep) & mask;
1750}
1751
1752/* If not locked, zap it. */
1753void __flush_tlb_all(void)
1754{
1755 unsigned long pstate;
1756 int i;
1757
1758 __asm__ __volatile__("flushw\n\t"
1759 "rdpr %%pstate, %0\n\t"
1760 "wrpr %0, %1, %%pstate"
1761 : "=r" (pstate)
1762 : "i" (PSTATE_IE));
1763 if (tlb_type == spitfire) {
1764 for (i = 0; i < 64; i++) {
1765 /* Spitfire Errata #32 workaround */
1766 /* NOTE: Always runs on spitfire, so no
1767 * cheetah+ page size encodings.
1768 */
1769 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1770 "flush %%g6"
1771 : /* No outputs */
1772 : "r" (0),
1773 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1774
1775 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
1776 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1777 "membar #Sync"
1778 : /* no outputs */
1779 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
1780 spitfire_put_dtlb_data(i, 0x0UL);
1781 }
1782
1783 /* Spitfire Errata #32 workaround */
1784 /* NOTE: Always runs on spitfire, so no
1785 * cheetah+ page size encodings.
1786 */
1787 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1788 "flush %%g6"
1789 : /* No outputs */
1790 : "r" (0),
1791 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1792
1793 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
1794 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1795 "membar #Sync"
1796 : /* no outputs */
1797 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1798 spitfire_put_itlb_data(i, 0x0UL);
1799 }
1800 }
1801 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1802 cheetah_flush_dtlb_all();
1803 cheetah_flush_itlb_all();
1804 }
1805 __asm__ __volatile__("wrpr %0, 0, %%pstate"
1806 : : "r" (pstate));
1807}
1808
1809#ifdef CONFIG_MEMORY_HOTPLUG
1810
1811void online_page(struct page *page)
1812{
1813 ClearPageReserved(page);
1814 set_page_count(page, 0);
1815 free_cold_page(page);
1816 totalram_pages++;
1817 num_physpages++;
1818}
1819
1820int remove_memory(u64 start, u64 size)
1821{
1822 return -EINVAL;
1823}
1824
1825#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
index 8b104be4662b..a079cf42505e 100644
--- a/arch/sparc64/mm/tlb.c
+++ b/arch/sparc64/mm/tlb.c
@@ -25,6 +25,8 @@ void flush_tlb_pending(void)
25 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); 25 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
26 26
27 if (mp->tlb_nr) { 27 if (mp->tlb_nr) {
28 flush_tsb_user(mp);
29
28 if (CTX_VALID(mp->mm->context)) { 30 if (CTX_VALID(mp->mm->context)) {
29#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
30 smp_flush_tlb_pending(mp->mm, mp->tlb_nr, 32 smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
@@ -47,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
47 if (pte_exec(orig)) 49 if (pte_exec(orig))
48 vaddr |= 0x1UL; 50 vaddr |= 0x1UL;
49 51
50 if (pte_dirty(orig)) { 52 if (tlb_type != hypervisor &&
53 pte_dirty(orig)) {
51 unsigned long paddr, pfn = pte_pfn(orig); 54 unsigned long paddr, pfn = pte_pfn(orig);
52 struct address_space *mapping; 55 struct address_space *mapping;
53 struct page *page; 56 struct page *page;
@@ -89,62 +92,3 @@ no_cache_flush:
89 if (nr >= TLB_BATCH_NR) 92 if (nr >= TLB_BATCH_NR)
90 flush_tlb_pending(); 93 flush_tlb_pending();
91} 94}
92
93void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
94{
95 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
96 unsigned long nr = mp->tlb_nr;
97 long s = start, e = end, vpte_base;
98
99 if (mp->fullmm)
100 return;
101
102 /* If start is greater than end, that is a real problem. */
103 BUG_ON(start > end);
104
105 /* However, straddling the VA space hole is quite normal. */
106 s &= PMD_MASK;
107 e = (e + PMD_SIZE - 1) & PMD_MASK;
108
109 vpte_base = (tlb_type == spitfire ?
110 VPTE_BASE_SPITFIRE :
111 VPTE_BASE_CHEETAH);
112
113 if (unlikely(nr != 0 && mm != mp->mm)) {
114 flush_tlb_pending();
115 nr = 0;
116 }
117
118 if (nr == 0)
119 mp->mm = mm;
120
121 start = vpte_base + (s >> (PAGE_SHIFT - 3));
122 end = vpte_base + (e >> (PAGE_SHIFT - 3));
123
124 /* If the request straddles the VA space hole, we
125 * need to swap start and end. The reason this
126 * occurs is that "vpte_base" is the center of
127 * the linear page table mapping area. Thus,
128 * high addresses with the sign bit set map to
129 * addresses below vpte_base and non-sign bit
130 * addresses map to addresses above vpte_base.
131 */
132 if (end < start) {
133 unsigned long tmp = start;
134
135 start = end;
136 end = tmp;
137 }
138
139 while (start < end) {
140 mp->vaddrs[nr] = start;
141 mp->tlb_nr = ++nr;
142 if (nr >= TLB_BATCH_NR) {
143 flush_tlb_pending();
144 nr = 0;
145 }
146 start += PAGE_SIZE;
147 }
148 if (nr)
149 flush_tlb_pending();
150}
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
new file mode 100644
index 000000000000..b2064e2a44d6
--- /dev/null
+++ b/arch/sparc64/mm/tsb.c
@@ -0,0 +1,440 @@
1/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <asm/system.h>
8#include <asm/page.h>
9#include <asm/tlbflush.h>
10#include <asm/tlb.h>
11#include <asm/mmu_context.h>
12#include <asm/pgtable.h>
13#include <asm/tsb.h>
14#include <asm/oplib.h>
15
16extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
17
18static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
19{
20 vaddr >>= PAGE_SHIFT;
21 return vaddr & (nentries - 1);
22}
23
24static inline int tag_compare(unsigned long tag, unsigned long vaddr)
25{
26 return (tag == (vaddr >> 22));
27}
28
29/* TSB flushes need only occur on the processor initiating the address
30 * space modification, not on each cpu the address space has run on.
31 * Only the TLB flush needs that treatment.
32 */
33
34void flush_tsb_kernel_range(unsigned long start, unsigned long end)
35{
36 unsigned long v;
37
38 for (v = start; v < end; v += PAGE_SIZE) {
39 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
40 struct tsb *ent = &swapper_tsb[hash];
41
42 if (tag_compare(ent->tag, v)) {
43 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
44 membar_storeload_storestore();
45 }
46 }
47}
48
49void flush_tsb_user(struct mmu_gather *mp)
50{
51 struct mm_struct *mm = mp->mm;
52 unsigned long nentries, base, flags;
53 struct tsb *tsb;
54 int i;
55
56 spin_lock_irqsave(&mm->context.lock, flags);
57
58 tsb = mm->context.tsb;
59 nentries = mm->context.tsb_nentries;
60
61 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
62 base = __pa(tsb);
63 else
64 base = (unsigned long) tsb;
65
66 for (i = 0; i < mp->tlb_nr; i++) {
67 unsigned long v = mp->vaddrs[i];
68 unsigned long tag, ent, hash;
69
70 v &= ~0x1UL;
71
72 hash = tsb_hash(v, nentries);
73 ent = base + (hash * sizeof(struct tsb));
74 tag = (v >> 22UL);
75
76 tsb_flush(ent, tag);
77 }
78
79 spin_unlock_irqrestore(&mm->context.lock, flags);
80}
81
82static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
83{
84 unsigned long tsb_reg, base, tsb_paddr;
85 unsigned long page_sz, tte;
86
87 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
88
89 base = TSBMAP_BASE;
90 tte = pgprot_val(PAGE_KERNEL_LOCKED);
91 tsb_paddr = __pa(mm->context.tsb);
92 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
93
94 /* Use the smallest page size that can map the whole TSB
95 * in one TLB entry.
96 */
97 switch (tsb_bytes) {
98 case 8192 << 0:
99 tsb_reg = 0x0UL;
100#ifdef DCACHE_ALIASING_POSSIBLE
101 base += (tsb_paddr & 8192);
102#endif
103 page_sz = 8192;
104 break;
105
106 case 8192 << 1:
107 tsb_reg = 0x1UL;
108 page_sz = 64 * 1024;
109 break;
110
111 case 8192 << 2:
112 tsb_reg = 0x2UL;
113 page_sz = 64 * 1024;
114 break;
115
116 case 8192 << 3:
117 tsb_reg = 0x3UL;
118 page_sz = 64 * 1024;
119 break;
120
121 case 8192 << 4:
122 tsb_reg = 0x4UL;
123 page_sz = 512 * 1024;
124 break;
125
126 case 8192 << 5:
127 tsb_reg = 0x5UL;
128 page_sz = 512 * 1024;
129 break;
130
131 case 8192 << 6:
132 tsb_reg = 0x6UL;
133 page_sz = 512 * 1024;
134 break;
135
136 case 8192 << 7:
137 tsb_reg = 0x7UL;
138 page_sz = 4 * 1024 * 1024;
139 break;
140
141 default:
142 BUG();
143 };
144 tte |= pte_sz_bits(page_sz);
145
146 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
147 /* Physical mapping, no locked TLB entry for TSB. */
148 tsb_reg |= tsb_paddr;
149
150 mm->context.tsb_reg_val = tsb_reg;
151 mm->context.tsb_map_vaddr = 0;
152 mm->context.tsb_map_pte = 0;
153 } else {
154 tsb_reg |= base;
155 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
156 tte |= (tsb_paddr & ~(page_sz - 1UL));
157
158 mm->context.tsb_reg_val = tsb_reg;
159 mm->context.tsb_map_vaddr = base;
160 mm->context.tsb_map_pte = tte;
161 }
162
163 /* Setup the Hypervisor TSB descriptor. */
164 if (tlb_type == hypervisor) {
165 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
166
167 switch (PAGE_SIZE) {
168 case 8192:
169 default:
170 hp->pgsz_idx = HV_PGSZ_IDX_8K;
171 break;
172
173 case 64 * 1024:
174 hp->pgsz_idx = HV_PGSZ_IDX_64K;
175 break;
176
177 case 512 * 1024:
178 hp->pgsz_idx = HV_PGSZ_IDX_512K;
179 break;
180
181 case 4 * 1024 * 1024:
182 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
183 break;
184 };
185 hp->assoc = 1;
186 hp->num_ttes = tsb_bytes / 16;
187 hp->ctx_idx = 0;
188 switch (PAGE_SIZE) {
189 case 8192:
190 default:
191 hp->pgsz_mask = HV_PGSZ_MASK_8K;
192 break;
193
194 case 64 * 1024:
195 hp->pgsz_mask = HV_PGSZ_MASK_64K;
196 break;
197
198 case 512 * 1024:
199 hp->pgsz_mask = HV_PGSZ_MASK_512K;
200 break;
201
202 case 4 * 1024 * 1024:
203 hp->pgsz_mask = HV_PGSZ_MASK_4MB;
204 break;
205 };
206 hp->tsb_base = tsb_paddr;
207 hp->resv = 0;
208 }
209}
210
211static kmem_cache_t *tsb_caches[8] __read_mostly;
212
213static const char *tsb_cache_names[8] = {
214 "tsb_8KB",
215 "tsb_16KB",
216 "tsb_32KB",
217 "tsb_64KB",
218 "tsb_128KB",
219 "tsb_256KB",
220 "tsb_512KB",
221 "tsb_1MB",
222};
223
224void __init tsb_cache_init(void)
225{
226 unsigned long i;
227
228 for (i = 0; i < 8; i++) {
229 unsigned long size = 8192 << i;
230 const char *name = tsb_cache_names[i];
231
232 tsb_caches[i] = kmem_cache_create(name,
233 size, size,
234 SLAB_HWCACHE_ALIGN |
235 SLAB_MUST_HWCACHE_ALIGN,
236 NULL, NULL);
237 if (!tsb_caches[i]) {
238 prom_printf("Could not create %s cache\n", name);
239 prom_halt();
240 }
241 }
242}
243
244/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
245 * do_sparc64_fault() invokes this routine to try and grow the TSB.
246 *
247 * When we reach the maximum TSB size supported, we stick ~0UL into
248 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
249 * will not trigger any longer.
250 *
251 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
252 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
253 * must be 512K aligned. It also must be physically contiguous, so we
254 * cannot use vmalloc().
255 *
256 * The idea here is to grow the TSB when the RSS of the process approaches
257 * the number of entries that the current TSB can hold at once. Currently,
258 * we trigger when the RSS hits 3/4 of the TSB capacity.
259 */
260void tsb_grow(struct mm_struct *mm, unsigned long rss)
261{
262 unsigned long max_tsb_size = 1 * 1024 * 1024;
263 unsigned long new_size, old_size, flags;
264 struct tsb *old_tsb, *new_tsb;
265 unsigned long new_cache_index, old_cache_index;
266 unsigned long new_rss_limit;
267 gfp_t gfp_flags;
268
269 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
270 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
271
272 new_cache_index = 0;
273 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
274 unsigned long n_entries = new_size / sizeof(struct tsb);
275
276 n_entries = (n_entries * 3) / 4;
277 if (n_entries > rss)
278 break;
279
280 new_cache_index++;
281 }
282
283 if (new_size == max_tsb_size)
284 new_rss_limit = ~0UL;
285 else
286 new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4;
287
288retry_tsb_alloc:
289 gfp_flags = GFP_KERNEL;
290 if (new_size > (PAGE_SIZE * 2))
291 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
292
293 new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags);
294 if (unlikely(!new_tsb)) {
295 /* Not being able to fork due to a high-order TSB
296 * allocation failure is very bad behavior. Just back
297 * down to a 0-order allocation and force no TSB
298 * growing for this address space.
299 */
300 if (mm->context.tsb == NULL && new_cache_index > 0) {
301 new_cache_index = 0;
302 new_size = 8192;
303 new_rss_limit = ~0UL;
304 goto retry_tsb_alloc;
305 }
306
307 /* If we failed on a TSB grow, we are under serious
308 * memory pressure so don't try to grow any more.
309 */
310 if (mm->context.tsb != NULL)
311 mm->context.tsb_rss_limit = ~0UL;
312 return;
313 }
314
315 /* Mark all tags as invalid. */
316 tsb_init(new_tsb, new_size);
317
318 /* Ok, we are about to commit the changes. If we are
319 * growing an existing TSB the locking is very tricky,
320 * so WATCH OUT!
321 *
322 * We have to hold mm->context.lock while committing to the
323 * new TSB, this synchronizes us with processors in
324 * flush_tsb_user() and switch_mm() for this address space.
325 *
326 * But even with that lock held, processors run asynchronously
327 * accessing the old TSB via TLB miss handling. This is OK
328 * because those actions are just propagating state from the
329 * Linux page tables into the TSB, page table mappings are not
330 * being changed. If a real fault occurs, the processor will
331 * synchronize with us when it hits flush_tsb_user(), this is
332 * also true for the case where vmscan is modifying the page
333 * tables. The only thing we need to be careful with is to
334 * skip any locked TSB entries during copy_tsb().
335 *
336 * When we finish committing to the new TSB, we have to drop
337 * the lock and ask all other cpus running this address space
338 * to run tsb_context_switch() to see the new TSB table.
339 */
340 spin_lock_irqsave(&mm->context.lock, flags);
341
342 old_tsb = mm->context.tsb;
343 old_cache_index = (mm->context.tsb_reg_val & 0x7UL);
344 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
345
346
347 /* Handle multiple threads trying to grow the TSB at the same time.
348 * One will get in here first, and bump the size and the RSS limit.
349 * The others will get in here next and hit this check.
350 */
351 if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
352 spin_unlock_irqrestore(&mm->context.lock, flags);
353
354 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
355 return;
356 }
357
358 mm->context.tsb_rss_limit = new_rss_limit;
359
360 if (old_tsb) {
361 extern void copy_tsb(unsigned long old_tsb_base,
362 unsigned long old_tsb_size,
363 unsigned long new_tsb_base,
364 unsigned long new_tsb_size);
365 unsigned long old_tsb_base = (unsigned long) old_tsb;
366 unsigned long new_tsb_base = (unsigned long) new_tsb;
367
368 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
369 old_tsb_base = __pa(old_tsb_base);
370 new_tsb_base = __pa(new_tsb_base);
371 }
372 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
373 }
374
375 mm->context.tsb = new_tsb;
376 setup_tsb_params(mm, new_size);
377
378 spin_unlock_irqrestore(&mm->context.lock, flags);
379
380 /* If old_tsb is NULL, we're being invoked for the first time
381 * from init_new_context().
382 */
383 if (old_tsb) {
384 /* Reload it on the local cpu. */
385 tsb_context_switch(mm);
386
387 /* Now force other processors to do the same. */
388 smp_tsb_sync(mm);
389
390 /* Now it is safe to free the old tsb. */
391 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
392 }
393}
394
395int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
396{
397 spin_lock_init(&mm->context.lock);
398
399 mm->context.sparc64_ctx_val = 0UL;
400
401 /* copy_mm() copies over the parent's mm_struct before calling
402 * us, so we need to zero out the TSB pointer or else tsb_grow()
403 * will be confused and think there is an older TSB to free up.
404 */
405 mm->context.tsb = NULL;
406
407 /* If this is fork, inherit the parent's TSB size. We would
408 * grow it to that size on the first page fault anyways.
409 */
410 tsb_grow(mm, get_mm_rss(mm));
411
412 if (unlikely(!mm->context.tsb))
413 return -ENOMEM;
414
415 return 0;
416}
417
418void destroy_context(struct mm_struct *mm)
419{
420 unsigned long flags, cache_index;
421
422 cache_index = (mm->context.tsb_reg_val & 0x7UL);
423 kmem_cache_free(tsb_caches[cache_index], mm->context.tsb);
424
425 /* We can remove these later, but for now it's useful
426 * to catch any bogus post-destroy_context() references
427 * to the TSB.
428 */
429 mm->context.tsb = NULL;
430 mm->context.tsb_reg_val = 0UL;
431
432 spin_lock_irqsave(&ctx_alloc_lock, flags);
433
434 if (CTX_VALID(mm->context)) {
435 unsigned long nr = CTX_NRBITS(mm->context);
436 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
437 }
438
439 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
440}
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index e4c9151fa116..f8479fad4047 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -15,6 +15,7 @@
15#include <asm/head.h> 15#include <asm/head.h>
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/hypervisor.h>
18 19
19 /* Basically, most of the Spitfire vs. Cheetah madness 20 /* Basically, most of the Spitfire vs. Cheetah madness
20 * has to do with the fact that Cheetah does not support 21 * has to do with the fact that Cheetah does not support
@@ -29,16 +30,18 @@
29 .text 30 .text
30 .align 32 31 .align 32
31 .globl __flush_tlb_mm 32 .globl __flush_tlb_mm
32__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ 33__flush_tlb_mm: /* 18 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
33 ldxa [%o1] ASI_DMMU, %g2 35 ldxa [%o1] ASI_DMMU, %g2
34 cmp %g2, %o0 36 cmp %g2, %o0
35 bne,pn %icc, __spitfire_flush_tlb_mm_slow 37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
36 mov 0x50, %g3 38 mov 0x50, %g3
37 stxa %g0, [%g3] ASI_DMMU_DEMAP 39 stxa %g0, [%g3] ASI_DMMU_DEMAP
38 stxa %g0, [%g3] ASI_IMMU_DEMAP 40 stxa %g0, [%g3] ASI_IMMU_DEMAP
41 sethi %hi(KERNBASE), %g3
42 flush %g3
39 retl 43 retl
40 flush %g6 44 nop
41 nop
42 nop 45 nop
43 nop 46 nop
44 nop 47 nop
@@ -51,7 +54,7 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
51 54
52 .align 32 55 .align 32
53 .globl __flush_tlb_pending 56 .globl __flush_tlb_pending
54__flush_tlb_pending: 57__flush_tlb_pending: /* 26 insns */
55 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
56 rdpr %pstate, %g7 59 rdpr %pstate, %g7
57 sllx %o1, 3, %o1 60 sllx %o1, 3, %o1
@@ -72,7 +75,8 @@ __flush_tlb_pending:
72 brnz,pt %o1, 1b 75 brnz,pt %o1, 1b
73 nop 76 nop
74 stxa %g2, [%o4] ASI_DMMU 77 stxa %g2, [%o4] ASI_DMMU
75 flush %g6 78 sethi %hi(KERNBASE), %o4
79 flush %o4
76 retl 80 retl
77 wrpr %g7, 0x0, %pstate 81 wrpr %g7, 0x0, %pstate
78 nop 82 nop
@@ -82,7 +86,8 @@ __flush_tlb_pending:
82 86
83 .align 32 87 .align 32
84 .globl __flush_tlb_kernel_range 88 .globl __flush_tlb_kernel_range
85__flush_tlb_kernel_range: /* %o0=start, %o1=end */ 89__flush_tlb_kernel_range: /* 16 insns */
90 /* %o0=start, %o1=end */
86 cmp %o0, %o1 91 cmp %o0, %o1
87 be,pn %xcc, 2f 92 be,pn %xcc, 2f
88 sethi %hi(PAGE_SIZE), %o4 93 sethi %hi(PAGE_SIZE), %o4
@@ -94,8 +99,11 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */
94 membar #Sync 99 membar #Sync
95 brnz,pt %o3, 1b 100 brnz,pt %o3, 1b
96 sub %o3, %o4, %o3 101 sub %o3, %o4, %o3
972: retl 1022: sethi %hi(KERNBASE), %o3
98 flush %g6 103 flush %o3
104 retl
105 nop
106 nop
99 107
100__spitfire_flush_tlb_mm_slow: 108__spitfire_flush_tlb_mm_slow:
101 rdpr %pstate, %g1 109 rdpr %pstate, %g1
@@ -105,7 +113,8 @@ __spitfire_flush_tlb_mm_slow:
105 stxa %g0, [%g3] ASI_IMMU_DEMAP 113 stxa %g0, [%g3] ASI_IMMU_DEMAP
106 flush %g6 114 flush %g6
107 stxa %g2, [%o1] ASI_DMMU 115 stxa %g2, [%o1] ASI_DMMU
108 flush %g6 116 sethi %hi(KERNBASE), %o1
117 flush %o1
109 retl 118 retl
110 wrpr %g1, 0, %pstate 119 wrpr %g1, 0, %pstate
111 120
@@ -181,7 +190,7 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
181 .previous 190 .previous
182 191
183 /* Cheetah specific versions, patched at boot time. */ 192 /* Cheetah specific versions, patched at boot time. */
184__cheetah_flush_tlb_mm: /* 18 insns */ 193__cheetah_flush_tlb_mm: /* 19 insns */
185 rdpr %pstate, %g7 194 rdpr %pstate, %g7
186 andn %g7, PSTATE_IE, %g2 195 andn %g7, PSTATE_IE, %g2
187 wrpr %g2, 0x0, %pstate 196 wrpr %g2, 0x0, %pstate
@@ -196,12 +205,13 @@ __cheetah_flush_tlb_mm: /* 18 insns */
196 stxa %g0, [%g3] ASI_DMMU_DEMAP 205 stxa %g0, [%g3] ASI_DMMU_DEMAP
197 stxa %g0, [%g3] ASI_IMMU_DEMAP 206 stxa %g0, [%g3] ASI_IMMU_DEMAP
198 stxa %g2, [%o2] ASI_DMMU 207 stxa %g2, [%o2] ASI_DMMU
199 flush %g6 208 sethi %hi(KERNBASE), %o2
209 flush %o2
200 wrpr %g0, 0, %tl 210 wrpr %g0, 0, %tl
201 retl 211 retl
202 wrpr %g7, 0x0, %pstate 212 wrpr %g7, 0x0, %pstate
203 213
204__cheetah_flush_tlb_pending: /* 26 insns */ 214__cheetah_flush_tlb_pending: /* 27 insns */
205 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 215 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
206 rdpr %pstate, %g7 216 rdpr %pstate, %g7
207 sllx %o1, 3, %o1 217 sllx %o1, 3, %o1
@@ -225,7 +235,8 @@ __cheetah_flush_tlb_pending: /* 26 insns */
225 brnz,pt %o1, 1b 235 brnz,pt %o1, 1b
226 nop 236 nop
227 stxa %g2, [%o4] ASI_DMMU 237 stxa %g2, [%o4] ASI_DMMU
228 flush %g6 238 sethi %hi(KERNBASE), %o4
239 flush %o4
229 wrpr %g0, 0, %tl 240 wrpr %g0, 0, %tl
230 retl 241 retl
231 wrpr %g7, 0x0, %pstate 242 wrpr %g7, 0x0, %pstate
@@ -245,7 +256,76 @@ __cheetah_flush_dcache_page: /* 11 insns */
245 nop 256 nop
246#endif /* DCACHE_ALIASING_POSSIBLE */ 257#endif /* DCACHE_ALIASING_POSSIBLE */
247 258
248cheetah_patch_one: 259 /* Hypervisor specific versions, patched at boot time. */
260__hypervisor_tlb_tl0_error:
261 save %sp, -192, %sp
262 mov %i0, %o0
263 call hypervisor_tlbop_error
264 mov %i1, %o1
265 ret
266 restore
267
268__hypervisor_flush_tlb_mm: /* 10 insns */
269 mov %o0, %o2 /* ARG2: mmu context */
270 mov 0, %o0 /* ARG0: CPU lists unimplemented */
271 mov 0, %o1 /* ARG1: CPU lists unimplemented */
272 mov HV_MMU_ALL, %o3 /* ARG3: flags */
273 mov HV_FAST_MMU_DEMAP_CTX, %o5
274 ta HV_FAST_TRAP
275 brnz,pn %o0, __hypervisor_tlb_tl0_error
276 mov HV_FAST_MMU_DEMAP_CTX, %o1
277 retl
278 nop
279
280__hypervisor_flush_tlb_pending: /* 16 insns */
281 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
282 sllx %o1, 3, %g1
283 mov %o2, %g2
284 mov %o0, %g3
2851: sub %g1, (1 << 3), %g1
286 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
287 mov %g3, %o1 /* ARG1: mmu context */
288 mov HV_MMU_ALL, %o2 /* ARG2: flags */
289 srlx %o0, PAGE_SHIFT, %o0
290 sllx %o0, PAGE_SHIFT, %o0
291 ta HV_MMU_UNMAP_ADDR_TRAP
292 brnz,pn %o0, __hypervisor_tlb_tl0_error
293 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
294 brnz,pt %g1, 1b
295 nop
296 retl
297 nop
298
299__hypervisor_flush_tlb_kernel_range: /* 16 insns */
300 /* %o0=start, %o1=end */
301 cmp %o0, %o1
302 be,pn %xcc, 2f
303 sethi %hi(PAGE_SIZE), %g3
304 mov %o0, %g1
305 sub %o1, %g1, %g2
306 sub %g2, %g3, %g2
3071: add %g1, %g2, %o0 /* ARG0: virtual address */
308 mov 0, %o1 /* ARG1: mmu context */
309 mov HV_MMU_ALL, %o2 /* ARG2: flags */
310 ta HV_MMU_UNMAP_ADDR_TRAP
311 brnz,pn %o0, __hypervisor_tlb_tl0_error
312 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
313 brnz,pt %g2, 1b
314 sub %g2, %g3, %g2
3152: retl
316 nop
317
318#ifdef DCACHE_ALIASING_POSSIBLE
319 /* XXX Niagara and friends have an 8K cache, so no aliasing is
320 * XXX possible, but nothing explicit in the Hypervisor API
321 * XXX guarantees this.
322 */
323__hypervisor_flush_dcache_page: /* 2 insns */
324 retl
325 nop
326#endif
327
328tlb_patch_one:
2491: lduw [%o1], %g1 3291: lduw [%o1], %g1
250 stw %g1, [%o0] 330 stw %g1, [%o0]
251 flush %o0 331 flush %o0
@@ -264,22 +344,22 @@ cheetah_patch_cachetlbops:
264 or %o0, %lo(__flush_tlb_mm), %o0 344 or %o0, %lo(__flush_tlb_mm), %o0
265 sethi %hi(__cheetah_flush_tlb_mm), %o1 345 sethi %hi(__cheetah_flush_tlb_mm), %o1
266 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 346 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
267 call cheetah_patch_one 347 call tlb_patch_one
268 mov 18, %o2 348 mov 19, %o2
269 349
270 sethi %hi(__flush_tlb_pending), %o0 350 sethi %hi(__flush_tlb_pending), %o0
271 or %o0, %lo(__flush_tlb_pending), %o0 351 or %o0, %lo(__flush_tlb_pending), %o0
272 sethi %hi(__cheetah_flush_tlb_pending), %o1 352 sethi %hi(__cheetah_flush_tlb_pending), %o1
273 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 353 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
274 call cheetah_patch_one 354 call tlb_patch_one
275 mov 26, %o2 355 mov 27, %o2
276 356
277#ifdef DCACHE_ALIASING_POSSIBLE 357#ifdef DCACHE_ALIASING_POSSIBLE
278 sethi %hi(__flush_dcache_page), %o0 358 sethi %hi(__flush_dcache_page), %o0
279 or %o0, %lo(__flush_dcache_page), %o0 359 or %o0, %lo(__flush_dcache_page), %o0
280 sethi %hi(__cheetah_flush_dcache_page), %o1 360 sethi %hi(__cheetah_flush_dcache_page), %o1
281 or %o1, %lo(__cheetah_flush_dcache_page), %o1 361 or %o1, %lo(__cheetah_flush_dcache_page), %o1
282 call cheetah_patch_one 362 call tlb_patch_one
283 mov 11, %o2 363 mov 11, %o2
284#endif /* DCACHE_ALIASING_POSSIBLE */ 364#endif /* DCACHE_ALIASING_POSSIBLE */
285 365
@@ -295,16 +375,14 @@ cheetah_patch_cachetlbops:
295 * %g1 address arg 1 (tlb page and range flushes) 375 * %g1 address arg 1 (tlb page and range flushes)
296 * %g7 address arg 2 (tlb range flush only) 376 * %g7 address arg 2 (tlb range flush only)
297 * 377 *
298 * %g6 ivector table, don't touch 378 * %g6 scratch 1
299 * %g2 scratch 1 379 * %g2 scratch 2
300 * %g3 scratch 2 380 * %g3 scratch 3
301 * %g4 scratch 3 381 * %g4 scratch 4
302 *
303 * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
304 */ 382 */
305 .align 32 383 .align 32
306 .globl xcall_flush_tlb_mm 384 .globl xcall_flush_tlb_mm
307xcall_flush_tlb_mm: 385xcall_flush_tlb_mm: /* 21 insns */
308 mov PRIMARY_CONTEXT, %g2 386 mov PRIMARY_CONTEXT, %g2
309 ldxa [%g2] ASI_DMMU, %g3 387 ldxa [%g2] ASI_DMMU, %g3
310 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 388 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -316,9 +394,19 @@ xcall_flush_tlb_mm:
316 stxa %g0, [%g4] ASI_IMMU_DEMAP 394 stxa %g0, [%g4] ASI_IMMU_DEMAP
317 stxa %g3, [%g2] ASI_DMMU 395 stxa %g3, [%g2] ASI_DMMU
318 retry 396 retry
397 nop
398 nop
399 nop
400 nop
401 nop
402 nop
403 nop
404 nop
405 nop
406 nop
319 407
320 .globl xcall_flush_tlb_pending 408 .globl xcall_flush_tlb_pending
321xcall_flush_tlb_pending: 409xcall_flush_tlb_pending: /* 21 insns */
322 /* %g5=context, %g1=nr, %g7=vaddrs[] */ 410 /* %g5=context, %g1=nr, %g7=vaddrs[] */
323 sllx %g1, 3, %g1 411 sllx %g1, 3, %g1
324 mov PRIMARY_CONTEXT, %g4 412 mov PRIMARY_CONTEXT, %g4
@@ -341,9 +429,10 @@ xcall_flush_tlb_pending:
341 nop 429 nop
342 stxa %g2, [%g4] ASI_DMMU 430 stxa %g2, [%g4] ASI_DMMU
343 retry 431 retry
432 nop
344 433
345 .globl xcall_flush_tlb_kernel_range 434 .globl xcall_flush_tlb_kernel_range
346xcall_flush_tlb_kernel_range: 435xcall_flush_tlb_kernel_range: /* 25 insns */
347 sethi %hi(PAGE_SIZE - 1), %g2 436 sethi %hi(PAGE_SIZE - 1), %g2
348 or %g2, %lo(PAGE_SIZE - 1), %g2 437 or %g2, %lo(PAGE_SIZE - 1), %g2
349 andn %g1, %g2, %g1 438 andn %g1, %g2, %g1
@@ -360,14 +449,30 @@ xcall_flush_tlb_kernel_range:
360 retry 449 retry
361 nop 450 nop
362 nop 451 nop
452 nop
453 nop
454 nop
455 nop
456 nop
457 nop
458 nop
459 nop
460 nop
363 461
364 /* This runs in a very controlled environment, so we do 462 /* This runs in a very controlled environment, so we do
365 * not need to worry about BH races etc. 463 * not need to worry about BH races etc.
366 */ 464 */
367 .globl xcall_sync_tick 465 .globl xcall_sync_tick
368xcall_sync_tick: 466xcall_sync_tick:
369 rdpr %pstate, %g2 467
468661: rdpr %pstate, %g2
370 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate 469 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
470 .section .sun4v_2insn_patch, "ax"
471 .word 661b
472 nop
473 nop
474 .previous
475
371 rdpr %pil, %g2 476 rdpr %pil, %g2
372 wrpr %g0, 15, %pil 477 wrpr %g0, 15, %pil
373 sethi %hi(109f), %g7 478 sethi %hi(109f), %g7
@@ -390,8 +495,15 @@ xcall_sync_tick:
390 */ 495 */
391 .globl xcall_report_regs 496 .globl xcall_report_regs
392xcall_report_regs: 497xcall_report_regs:
393 rdpr %pstate, %g2 498
499661: rdpr %pstate, %g2
394 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate 500 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
501 .section .sun4v_2insn_patch, "ax"
502 .word 661b
503 nop
504 nop
505 .previous
506
395 rdpr %pil, %g2 507 rdpr %pil, %g2
396 wrpr %g0, 15, %pil 508 wrpr %g0, 15, %pil
397 sethi %hi(109f), %g7 509 sethi %hi(109f), %g7
@@ -453,62 +565,96 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
453 nop 565 nop
454 nop 566 nop
455 567
456 .data 568 /* %g5: error
457 569 * %g6: tlb op
458errata32_hwbug: 570 */
459 .xword 0 571__hypervisor_tlb_xcall_error:
460 572 mov %g5, %g4
461 .text 573 mov %g6, %g5
462 574 ba,pt %xcc, etrap
463 /* These two are not performance critical... */ 575 rd %pc, %g7
464 .globl xcall_flush_tlb_all_spitfire 576 mov %l4, %o0
465xcall_flush_tlb_all_spitfire: 577 call hypervisor_tlbop_error_xcall
466 /* Spitfire Errata #32 workaround. */ 578 mov %l5, %o1
467 sethi %hi(errata32_hwbug), %g4 579 ba,a,pt %xcc, rtrap_clr_l6
468 stx %g0, [%g4 + %lo(errata32_hwbug)] 580
469 581 .globl __hypervisor_xcall_flush_tlb_mm
470 clr %g2 582__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
471 clr %g3 583 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
4721: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4 584 mov %o0, %g2
473 and %g4, _PAGE_L, %g5 585 mov %o1, %g3
474 brnz,pn %g5, 2f 586 mov %o2, %g4
475 mov TLB_TAG_ACCESS, %g7 587 mov %o3, %g1
476 588 mov %o5, %g7
477 stxa %g0, [%g7] ASI_DMMU 589 clr %o0 /* ARG0: CPU lists unimplemented */
478 membar #Sync 590 clr %o1 /* ARG1: CPU lists unimplemented */
479 stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS 591 mov %g5, %o2 /* ARG2: mmu context */
592 mov HV_MMU_ALL, %o3 /* ARG3: flags */
593 mov HV_FAST_MMU_DEMAP_CTX, %o5
594 ta HV_FAST_TRAP
595 mov HV_FAST_MMU_DEMAP_CTX, %g6
596 brnz,pn %o0, __hypervisor_tlb_xcall_error
597 mov %o0, %g5
598 mov %g2, %o0
599 mov %g3, %o1
600 mov %g4, %o2
601 mov %g1, %o3
602 mov %g7, %o5
480 membar #Sync 603 membar #Sync
604 retry
481 605
482 /* Spitfire Errata #32 workaround. */ 606 .globl __hypervisor_xcall_flush_tlb_pending
483 sethi %hi(errata32_hwbug), %g4 607__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
484 stx %g0, [%g4 + %lo(errata32_hwbug)] 608 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
485 609 sllx %g1, 3, %g1
4862: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4 610 mov %o0, %g2
487 and %g4, _PAGE_L, %g5 611 mov %o1, %g3
488 brnz,pn %g5, 2f 612 mov %o2, %g4
489 mov TLB_TAG_ACCESS, %g7 6131: sub %g1, (1 << 3), %g1
490 614 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
491 stxa %g0, [%g7] ASI_IMMU 615 mov %g5, %o1 /* ARG1: mmu context */
492 membar #Sync 616 mov HV_MMU_ALL, %o2 /* ARG2: flags */
493 stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS 617 srlx %o0, PAGE_SHIFT, %o0
618 sllx %o0, PAGE_SHIFT, %o0
619 ta HV_MMU_UNMAP_ADDR_TRAP
620 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
621 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
622 mov %o0, %g5
623 brnz,pt %g1, 1b
624 nop
625 mov %g2, %o0
626 mov %g3, %o1
627 mov %g4, %o2
494 membar #Sync 628 membar #Sync
495
496 /* Spitfire Errata #32 workaround. */
497 sethi %hi(errata32_hwbug), %g4
498 stx %g0, [%g4 + %lo(errata32_hwbug)]
499
5002: add %g2, 1, %g2
501 cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
502 ble,pt %icc, 1b
503 sll %g2, 3, %g3
504 flush %g6
505 retry 629 retry
506 630
507 .globl xcall_flush_tlb_all_cheetah 631 .globl __hypervisor_xcall_flush_tlb_kernel_range
508xcall_flush_tlb_all_cheetah: 632__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
509 mov 0x80, %g2 633 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
510 stxa %g0, [%g2] ASI_DMMU_DEMAP 634 sethi %hi(PAGE_SIZE - 1), %g2
511 stxa %g0, [%g2] ASI_IMMU_DEMAP 635 or %g2, %lo(PAGE_SIZE - 1), %g2
636 andn %g1, %g2, %g1
637 andn %g7, %g2, %g7
638 sub %g7, %g1, %g3
639 add %g2, 1, %g2
640 sub %g3, %g2, %g3
641 mov %o0, %g2
642 mov %o1, %g4
643 mov %o2, %g7
6441: add %g1, %g3, %o0 /* ARG0: virtual address */
645 mov 0, %o1 /* ARG1: mmu context */
646 mov HV_MMU_ALL, %o2 /* ARG2: flags */
647 ta HV_MMU_UNMAP_ADDR_TRAP
648 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
649 brnz,pn %o0, __hypervisor_tlb_xcall_error
650 mov %o0, %g5
651 sethi %hi(PAGE_SIZE), %o2
652 brnz,pt %g3, 1b
653 sub %g3, %o2, %g3
654 mov %g2, %o0
655 mov %g4, %o1
656 mov %g7, %o2
657 membar #Sync
512 retry 658 retry
513 659
514 /* These just get rescheduled to PIL vectors. */ 660 /* These just get rescheduled to PIL vectors. */
@@ -527,4 +673,70 @@ xcall_capture:
527 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 673 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
528 retry 674 retry
529 675
676 .globl xcall_new_mmu_context_version
677xcall_new_mmu_context_version:
678 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
679 retry
680
530#endif /* CONFIG_SMP */ 681#endif /* CONFIG_SMP */
682
683
684 .globl hypervisor_patch_cachetlbops
685hypervisor_patch_cachetlbops:
686 save %sp, -128, %sp
687
688 sethi %hi(__flush_tlb_mm), %o0
689 or %o0, %lo(__flush_tlb_mm), %o0
690 sethi %hi(__hypervisor_flush_tlb_mm), %o1
691 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
692 call tlb_patch_one
693 mov 10, %o2
694
695 sethi %hi(__flush_tlb_pending), %o0
696 or %o0, %lo(__flush_tlb_pending), %o0
697 sethi %hi(__hypervisor_flush_tlb_pending), %o1
698 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
699 call tlb_patch_one
700 mov 16, %o2
701
702 sethi %hi(__flush_tlb_kernel_range), %o0
703 or %o0, %lo(__flush_tlb_kernel_range), %o0
704 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
705 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
706 call tlb_patch_one
707 mov 16, %o2
708
709#ifdef DCACHE_ALIASING_POSSIBLE
710 sethi %hi(__flush_dcache_page), %o0
711 or %o0, %lo(__flush_dcache_page), %o0
712 sethi %hi(__hypervisor_flush_dcache_page), %o1
713 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
714 call tlb_patch_one
715 mov 2, %o2
716#endif /* DCACHE_ALIASING_POSSIBLE */
717
718#ifdef CONFIG_SMP
719 sethi %hi(xcall_flush_tlb_mm), %o0
720 or %o0, %lo(xcall_flush_tlb_mm), %o0
721 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
722 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
723 call tlb_patch_one
724 mov 21, %o2
725
726 sethi %hi(xcall_flush_tlb_pending), %o0
727 or %o0, %lo(xcall_flush_tlb_pending), %o0
728 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
729 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
730 call tlb_patch_one
731 mov 21, %o2
732
733 sethi %hi(xcall_flush_tlb_kernel_range), %o0
734 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
735 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
736 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
737 call tlb_patch_one
738 mov 25, %o2
739#endif /* CONFIG_SMP */
740
741 ret
742 restore
diff --git a/arch/sparc64/prom/cif.S b/arch/sparc64/prom/cif.S
index 29d0ae74aed8..5f27ad779c0c 100644
--- a/arch/sparc64/prom/cif.S
+++ b/arch/sparc64/prom/cif.S
@@ -1,10 +1,12 @@
1/* cif.S: PROM entry/exit assembler trampolines. 1/* cif.S: PROM entry/exit assembler trampolines.
2 * 2 *
3 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 3 * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
4 * Copyright (C) 2005 David S. Miller <davem@davemloft.net> 4 * Copyright (C) 2005, 2006 David S. Miller <davem@davemloft.net>
5 */ 5 */
6 6
7#include <asm/pstate.h> 7#include <asm/pstate.h>
8#include <asm/cpudata.h>
9#include <asm/thread_info.h>
8 10
9 .text 11 .text
10 .globl prom_cif_interface 12 .globl prom_cif_interface
@@ -12,78 +14,16 @@ prom_cif_interface:
12 sethi %hi(p1275buf), %o0 14 sethi %hi(p1275buf), %o0
13 or %o0, %lo(p1275buf), %o0 15 or %o0, %lo(p1275buf), %o0
14 ldx [%o0 + 0x010], %o1 ! prom_cif_stack 16 ldx [%o0 + 0x010], %o1 ! prom_cif_stack
15 save %o1, -0x190, %sp 17 save %o1, -192, %sp
16 ldx [%i0 + 0x008], %l2 ! prom_cif_handler 18 ldx [%i0 + 0x008], %l2 ! prom_cif_handler
17 rdpr %pstate, %l4 19 mov %g4, %l0
18 wrpr %g0, 0x15, %pstate ! save alternate globals 20 mov %g5, %l1
19 stx %g1, [%sp + 2047 + 0x0b0] 21 mov %g6, %l3
20 stx %g2, [%sp + 2047 + 0x0b8]
21 stx %g3, [%sp + 2047 + 0x0c0]
22 stx %g4, [%sp + 2047 + 0x0c8]
23 stx %g5, [%sp + 2047 + 0x0d0]
24 stx %g6, [%sp + 2047 + 0x0d8]
25 stx %g7, [%sp + 2047 + 0x0e0]
26 wrpr %g0, 0x814, %pstate ! save interrupt globals
27 stx %g1, [%sp + 2047 + 0x0e8]
28 stx %g2, [%sp + 2047 + 0x0f0]
29 stx %g3, [%sp + 2047 + 0x0f8]
30 stx %g4, [%sp + 2047 + 0x100]
31 stx %g5, [%sp + 2047 + 0x108]
32 stx %g6, [%sp + 2047 + 0x110]
33 stx %g7, [%sp + 2047 + 0x118]
34 wrpr %g0, 0x14, %pstate ! save normal globals
35 stx %g1, [%sp + 2047 + 0x120]
36 stx %g2, [%sp + 2047 + 0x128]
37 stx %g3, [%sp + 2047 + 0x130]
38 stx %g4, [%sp + 2047 + 0x138]
39 stx %g5, [%sp + 2047 + 0x140]
40 stx %g6, [%sp + 2047 + 0x148]
41 stx %g7, [%sp + 2047 + 0x150]
42 wrpr %g0, 0x414, %pstate ! save mmu globals
43 stx %g1, [%sp + 2047 + 0x158]
44 stx %g2, [%sp + 2047 + 0x160]
45 stx %g3, [%sp + 2047 + 0x168]
46 stx %g4, [%sp + 2047 + 0x170]
47 stx %g5, [%sp + 2047 + 0x178]
48 stx %g6, [%sp + 2047 + 0x180]
49 stx %g7, [%sp + 2047 + 0x188]
50 mov %g1, %l0 ! also save to locals, so we can handle
51 mov %g2, %l1 ! tlb faults later on, when accessing
52 mov %g3, %l3 ! the stack.
53 mov %g7, %l5
54 wrpr %l4, PSTATE_IE, %pstate ! turn off interrupts
55 call %l2 22 call %l2
56 add %i0, 0x018, %o0 ! prom_args 23 add %i0, 0x018, %o0 ! prom_args
57 wrpr %g0, 0x414, %pstate ! restore mmu globals 24 mov %l0, %g4
58 mov %l0, %g1 25 mov %l1, %g5
59 mov %l1, %g2 26 mov %l3, %g6
60 mov %l3, %g3
61 mov %l5, %g7
62 wrpr %g0, 0x14, %pstate ! restore normal globals
63 ldx [%sp + 2047 + 0x120], %g1
64 ldx [%sp + 2047 + 0x128], %g2
65 ldx [%sp + 2047 + 0x130], %g3
66 ldx [%sp + 2047 + 0x138], %g4
67 ldx [%sp + 2047 + 0x140], %g5
68 ldx [%sp + 2047 + 0x148], %g6
69 ldx [%sp + 2047 + 0x150], %g7
70 wrpr %g0, 0x814, %pstate ! restore interrupt globals
71 ldx [%sp + 2047 + 0x0e8], %g1
72 ldx [%sp + 2047 + 0x0f0], %g2
73 ldx [%sp + 2047 + 0x0f8], %g3
74 ldx [%sp + 2047 + 0x100], %g4
75 ldx [%sp + 2047 + 0x108], %g5
76 ldx [%sp + 2047 + 0x110], %g6
77 ldx [%sp + 2047 + 0x118], %g7
78 wrpr %g0, 0x15, %pstate ! restore alternate globals
79 ldx [%sp + 2047 + 0x0b0], %g1
80 ldx [%sp + 2047 + 0x0b8], %g2
81 ldx [%sp + 2047 + 0x0c0], %g3
82 ldx [%sp + 2047 + 0x0c8], %g4
83 ldx [%sp + 2047 + 0x0d0], %g5
84 ldx [%sp + 2047 + 0x0d8], %g6
85 ldx [%sp + 2047 + 0x0e0], %g7
86 wrpr %l4, 0, %pstate ! restore original pstate
87 ret 27 ret
88 restore 28 restore
89 29
@@ -91,135 +31,18 @@ prom_cif_interface:
91prom_cif_callback: 31prom_cif_callback:
92 sethi %hi(p1275buf), %o1 32 sethi %hi(p1275buf), %o1
93 or %o1, %lo(p1275buf), %o1 33 or %o1, %lo(p1275buf), %o1
94 save %sp, -0x270, %sp 34 save %sp, -192, %sp
95 rdpr %pstate, %l4 35 TRAP_LOAD_THREAD_REG(%g6, %g1)
96 wrpr %g0, 0x15, %pstate ! save PROM alternate globals 36 LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %o0)
97 stx %g1, [%sp + 2047 + 0x0b0] 37 ldx [%g6 + TI_TASK], %g4
98 stx %g2, [%sp + 2047 + 0x0b8]
99 stx %g3, [%sp + 2047 + 0x0c0]
100 stx %g4, [%sp + 2047 + 0x0c8]
101 stx %g5, [%sp + 2047 + 0x0d0]
102 stx %g6, [%sp + 2047 + 0x0d8]
103 stx %g7, [%sp + 2047 + 0x0e0]
104 ! restore Linux alternate globals
105 ldx [%sp + 2047 + 0x190], %g1
106 ldx [%sp + 2047 + 0x198], %g2
107 ldx [%sp + 2047 + 0x1a0], %g3
108 ldx [%sp + 2047 + 0x1a8], %g4
109 ldx [%sp + 2047 + 0x1b0], %g5
110 ldx [%sp + 2047 + 0x1b8], %g6
111 ldx [%sp + 2047 + 0x1c0], %g7
112 wrpr %g0, 0x814, %pstate ! save PROM interrupt globals
113 stx %g1, [%sp + 2047 + 0x0e8]
114 stx %g2, [%sp + 2047 + 0x0f0]
115 stx %g3, [%sp + 2047 + 0x0f8]
116 stx %g4, [%sp + 2047 + 0x100]
117 stx %g5, [%sp + 2047 + 0x108]
118 stx %g6, [%sp + 2047 + 0x110]
119 stx %g7, [%sp + 2047 + 0x118]
120 ! restore Linux interrupt globals
121 ldx [%sp + 2047 + 0x1c8], %g1
122 ldx [%sp + 2047 + 0x1d0], %g2
123 ldx [%sp + 2047 + 0x1d8], %g3
124 ldx [%sp + 2047 + 0x1e0], %g4
125 ldx [%sp + 2047 + 0x1e8], %g5
126 ldx [%sp + 2047 + 0x1f0], %g6
127 ldx [%sp + 2047 + 0x1f8], %g7
128 wrpr %g0, 0x14, %pstate ! save PROM normal globals
129 stx %g1, [%sp + 2047 + 0x120]
130 stx %g2, [%sp + 2047 + 0x128]
131 stx %g3, [%sp + 2047 + 0x130]
132 stx %g4, [%sp + 2047 + 0x138]
133 stx %g5, [%sp + 2047 + 0x140]
134 stx %g6, [%sp + 2047 + 0x148]
135 stx %g7, [%sp + 2047 + 0x150]
136 ! restore Linux normal globals
137 ldx [%sp + 2047 + 0x200], %g1
138 ldx [%sp + 2047 + 0x208], %g2
139 ldx [%sp + 2047 + 0x210], %g3
140 ldx [%sp + 2047 + 0x218], %g4
141 ldx [%sp + 2047 + 0x220], %g5
142 ldx [%sp + 2047 + 0x228], %g6
143 ldx [%sp + 2047 + 0x230], %g7
144 wrpr %g0, 0x414, %pstate ! save PROM mmu globals
145 stx %g1, [%sp + 2047 + 0x158]
146 stx %g2, [%sp + 2047 + 0x160]
147 stx %g3, [%sp + 2047 + 0x168]
148 stx %g4, [%sp + 2047 + 0x170]
149 stx %g5, [%sp + 2047 + 0x178]
150 stx %g6, [%sp + 2047 + 0x180]
151 stx %g7, [%sp + 2047 + 0x188]
152 ! restore Linux mmu globals
153 ldx [%sp + 2047 + 0x238], %o0
154 ldx [%sp + 2047 + 0x240], %o1
155 ldx [%sp + 2047 + 0x248], %l2
156 ldx [%sp + 2047 + 0x250], %l3
157 ldx [%sp + 2047 + 0x258], %l5
158 ldx [%sp + 2047 + 0x260], %l6
159 ldx [%sp + 2047 + 0x268], %l7
160 ! switch to Linux tba
161 sethi %hi(sparc64_ttable_tl0), %l1
162 rdpr %tba, %l0 ! save PROM tba
163 mov %o0, %g1
164 mov %o1, %g2
165 mov %l2, %g3
166 mov %l3, %g4
167 mov %l5, %g5
168 mov %l6, %g6
169 mov %l7, %g7
170 wrpr %l1, %tba ! install Linux tba
171 wrpr %l4, 0, %pstate ! restore PSTATE
172 call prom_world 38 call prom_world
173 mov %g0, %o0 39 mov 0, %o0
174 ldx [%i1 + 0x000], %l2 40 ldx [%i1 + 0x000], %l2
175 call %l2 41 call %l2
176 mov %i0, %o0 42 mov %i0, %o0
177 mov %o0, %l1 43 mov %o0, %l1
178 call prom_world 44 call prom_world
179 or %g0, 1, %o0 45 mov 1, %o0
180 wrpr %g0, 0x14, %pstate ! interrupts off
181 ! restore PROM mmu globals
182 ldx [%sp + 2047 + 0x158], %o0
183 ldx [%sp + 2047 + 0x160], %o1
184 ldx [%sp + 2047 + 0x168], %l2
185 ldx [%sp + 2047 + 0x170], %l3
186 ldx [%sp + 2047 + 0x178], %l5
187 ldx [%sp + 2047 + 0x180], %l6
188 ldx [%sp + 2047 + 0x188], %l7
189 wrpr %g0, 0x414, %pstate ! restore PROM mmu globals
190 mov %o0, %g1
191 mov %o1, %g2
192 mov %l2, %g3
193 mov %l3, %g4
194 mov %l5, %g5
195 mov %l6, %g6
196 mov %l7, %g7
197 wrpr %l0, %tba ! restore PROM tba
198 wrpr %g0, 0x14, %pstate ! restore PROM normal globals
199 ldx [%sp + 2047 + 0x120], %g1
200 ldx [%sp + 2047 + 0x128], %g2
201 ldx [%sp + 2047 + 0x130], %g3
202 ldx [%sp + 2047 + 0x138], %g4
203 ldx [%sp + 2047 + 0x140], %g5
204 ldx [%sp + 2047 + 0x148], %g6
205 ldx [%sp + 2047 + 0x150], %g7
206 wrpr %g0, 0x814, %pstate ! restore PROM interrupt globals
207 ldx [%sp + 2047 + 0x0e8], %g1
208 ldx [%sp + 2047 + 0x0f0], %g2
209 ldx [%sp + 2047 + 0x0f8], %g3
210 ldx [%sp + 2047 + 0x100], %g4
211 ldx [%sp + 2047 + 0x108], %g5
212 ldx [%sp + 2047 + 0x110], %g6
213 ldx [%sp + 2047 + 0x118], %g7
214 wrpr %g0, 0x15, %pstate ! restore PROM alternate globals
215 ldx [%sp + 2047 + 0x0b0], %g1
216 ldx [%sp + 2047 + 0x0b8], %g2
217 ldx [%sp + 2047 + 0x0c0], %g3
218 ldx [%sp + 2047 + 0x0c8], %g4
219 ldx [%sp + 2047 + 0x0d0], %g5
220 ldx [%sp + 2047 + 0x0d8], %g6
221 ldx [%sp + 2047 + 0x0e0], %g7
222 wrpr %l4, 0, %pstate
223 ret 46 ret
224 restore %l1, 0, %o0 47 restore %l1, 0, %o0
225 48
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
index ac6d035dd150..7c25c54cefdc 100644
--- a/arch/sparc64/prom/console.c
+++ b/arch/sparc64/prom/console.c
@@ -102,6 +102,9 @@ prom_query_input_device(void)
102 if (!strncmp (propb, "rsc", 3)) 102 if (!strncmp (propb, "rsc", 3))
103 return PROMDEV_IRSC; 103 return PROMDEV_IRSC;
104 104
105 if (!strncmp (propb, "virtual-console", 3))
106 return PROMDEV_IVCONS;
107
105 if (strncmp (propb, "tty", 3) || !propb[3]) 108 if (strncmp (propb, "tty", 3) || !propb[3])
106 return PROMDEV_I_UNK; 109 return PROMDEV_I_UNK;
107 110
@@ -143,6 +146,9 @@ prom_query_output_device(void)
143 if (!strncmp (propb, "rsc", 3)) 146 if (!strncmp (propb, "rsc", 3))
144 return PROMDEV_ORSC; 147 return PROMDEV_ORSC;
145 148
149 if (!strncmp (propb, "virtual-console", 3))
150 return PROMDEV_OVCONS;
151
146 if (strncmp (propb, "tty", 3) || !propb[3]) 152 if (strncmp (propb, "tty", 3) || !propb[3])
147 return PROMDEV_O_UNK; 153 return PROMDEV_O_UNK;
148 154
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
index f3cc2d8578b2..1c0db842a6f4 100644
--- a/arch/sparc64/prom/init.c
+++ b/arch/sparc64/prom/init.c
@@ -14,11 +14,10 @@
14#include <asm/openprom.h> 14#include <asm/openprom.h>
15#include <asm/oplib.h> 15#include <asm/oplib.h>
16 16
17enum prom_major_version prom_vers; 17/* OBP version string. */
18unsigned int prom_rev, prom_prev; 18char prom_version[80];
19 19
20/* The root node of the prom device tree. */ 20/* The root node of the prom device tree. */
21int prom_root_node;
22int prom_stdin, prom_stdout; 21int prom_stdin, prom_stdout;
23int prom_chosen_node; 22int prom_chosen_node;
24 23
@@ -31,68 +30,25 @@ extern void prom_cif_init(void *, void *);
31 30
32void __init prom_init(void *cif_handler, void *cif_stack) 31void __init prom_init(void *cif_handler, void *cif_stack)
33{ 32{
34 char buffer[80], *p;
35 int ints[3];
36 int node; 33 int node;
37 int i = 0;
38 int bufadjust;
39
40 prom_vers = PROM_P1275;
41 34
42 prom_cif_init(cif_handler, cif_stack); 35 prom_cif_init(cif_handler, cif_stack);
43 36
44 prom_root_node = prom_getsibling(0);
45 if((prom_root_node == 0) || (prom_root_node == -1))
46 prom_halt();
47
48 prom_chosen_node = prom_finddevice(prom_chosen_path); 37 prom_chosen_node = prom_finddevice(prom_chosen_path);
49 if (!prom_chosen_node || prom_chosen_node == -1) 38 if (!prom_chosen_node || prom_chosen_node == -1)
50 prom_halt(); 39 prom_halt();
51 40
52 prom_stdin = prom_getint (prom_chosen_node, "stdin"); 41 prom_stdin = prom_getint(prom_chosen_node, "stdin");
53 prom_stdout = prom_getint (prom_chosen_node, "stdout"); 42 prom_stdout = prom_getint(prom_chosen_node, "stdout");
54 43
55 node = prom_finddevice("/openprom"); 44 node = prom_finddevice("/openprom");
56 if (!node || node == -1) 45 if (!node || node == -1)
57 prom_halt(); 46 prom_halt();
58 47
59 prom_getstring (node, "version", buffer, sizeof (buffer)); 48 prom_getstring(node, "version", prom_version, sizeof(prom_version));
60
61 prom_printf ("\n");
62
63 if (strncmp (buffer, "OBP ", 4))
64 goto strange_version;
65
66 /*
67 * Version field is expected to be 'OBP xx.yy.zz date...'
68 * However, Sun can't stick to this format very well, so
69 * we need to check for 'OBP xx.yy.zz date...' and adjust
70 * accordingly. -spot
71 */
72
73 if (strncmp (buffer, "OBP ", 5))
74 bufadjust = 4;
75 else
76 bufadjust = 5;
77
78 p = buffer + bufadjust;
79 while (p && isdigit(*p) && i < 3) {
80 ints[i++] = simple_strtoul(p, NULL, 0);
81 if ((p = strchr(p, '.')) != NULL)
82 p++;
83 }
84 if (i != 3)
85 goto strange_version;
86
87 prom_rev = ints[1];
88 prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2];
89
90 printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
91 49
92 /* Initialization successful. */ 50 prom_printf("\n");
93 return;
94 51
95strange_version: 52 printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version);
96 prom_printf ("Strange OBP version `%s'.\n", buffer); 53 printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible);
97 prom_halt ();
98} 54}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 87f5cfce23bb..577bde8b6647 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -112,28 +112,20 @@ unsigned char prom_get_idprom(char *idbuf, int num_bytes)
112 return 0xff; 112 return 0xff;
113} 113}
114 114
115/* Get the major prom version number. */ 115/* Install Linux trap table so PROM uses that instead of its own. */
116int prom_version(void) 116void prom_set_trap_table(unsigned long tba)
117{
118 return PROM_P1275;
119}
120
121/* Get the prom plugin-revision. */
122int prom_getrev(void)
123{
124 return prom_rev;
125}
126
127/* Get the prom firmware print revision. */
128int prom_getprev(void)
129{ 117{
130 return prom_prev; 118 p1275_cmd("SUNW,set-trap-table",
119 (P1275_ARG(0, P1275_ARG_IN_64B) |
120 P1275_INOUT(1, 0)), tba);
131} 121}
132 122
133/* Install Linux trap table so PROM uses that instead of its own. */ 123void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa)
134void prom_set_trap_table(unsigned long tba)
135{ 124{
136 p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); 125 p1275_cmd("SUNW,set-trap-table",
126 (P1275_ARG(0, P1275_ARG_IN_64B) |
127 P1275_ARG(1, P1275_ARG_IN_64B) |
128 P1275_INOUT(2, 0)), tba, mmfsa);
137} 129}
138 130
139int prom_get_mmu_ihandle(void) 131int prom_get_mmu_ihandle(void)
@@ -303,9 +295,21 @@ int prom_wakeupsystem(void)
303} 295}
304 296
305#ifdef CONFIG_SMP 297#ifdef CONFIG_SMP
306void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0) 298void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg)
299{
300 p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg);
301}
302
303void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg)
304{
305 p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0),
306 cpuid, pc, arg);
307}
308
309void prom_stopcpu_cpuid(int cpuid)
307{ 310{
308 p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, o0); 311 p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0),
312 cpuid);
309} 313}
310 314
311void prom_stopself(void) 315void prom_stopself(void)
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
index a5a7c5712028..2b32c489860c 100644
--- a/arch/sparc64/prom/p1275.c
+++ b/arch/sparc64/prom/p1275.c
@@ -30,16 +30,6 @@ extern void prom_world(int);
30extern void prom_cif_interface(void); 30extern void prom_cif_interface(void);
31extern void prom_cif_callback(void); 31extern void prom_cif_callback(void);
32 32
33static inline unsigned long spitfire_get_primary_context(void)
34{
35 unsigned long ctx;
36
37 __asm__ __volatile__("ldxa [%1] %2, %0"
38 : "=r" (ctx)
39 : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
40 return ctx;
41}
42
43/* 33/*
44 * This provides SMP safety on the p1275buf. prom_callback() drops this lock 34 * This provides SMP safety on the p1275buf. prom_callback() drops this lock
45 * to allow recursuve acquisition. 35 * to allow recursuve acquisition.
@@ -55,7 +45,6 @@ long p1275_cmd(const char *service, long fmt, ...)
55 long attrs, x; 45 long attrs, x;
56 46
57 p = p1275buf.prom_buffer; 47 p = p1275buf.prom_buffer;
58 BUG_ON((spitfire_get_primary_context() & CTX_NR_MASK) != 0);
59 48
60 spin_lock_irqsave(&prom_entry_lock, flags); 49 spin_lock_irqsave(&prom_entry_lock, flags);
61 50
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index b1ff9e87dcc6..49075abd7cbc 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -51,7 +51,7 @@ prom_getparent(int node)
51__inline__ int 51__inline__ int
52__prom_getsibling(int node) 52__prom_getsibling(int node)
53{ 53{
54 return p1275_cmd ("peer", P1275_INOUT(1, 1), node); 54 return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node);
55} 55}
56 56
57__inline__ int 57__inline__ int
@@ -59,9 +59,12 @@ prom_getsibling(int node)
59{ 59{
60 int sibnode; 60 int sibnode;
61 61
62 if(node == -1) return 0; 62 if (node == -1)
63 return 0;
63 sibnode = __prom_getsibling(node); 64 sibnode = __prom_getsibling(node);
64 if(sibnode == -1) return 0; 65 if (sibnode == -1)
66 return 0;
67
65 return sibnode; 68 return sibnode;
66} 69}
67 70
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
index 3ab4677395f2..5284996780a7 100644
--- a/arch/sparc64/solaris/misc.c
+++ b/arch/sparc64/solaris/misc.c
@@ -90,7 +90,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o
90 len = PAGE_ALIGN(len); 90 len = PAGE_ALIGN(len);
91 if(!(flags & MAP_FIXED)) 91 if(!(flags & MAP_FIXED))
92 addr = 0; 92 addr = 0;
93 else if (len > 0xf0000000UL || addr > 0xf0000000UL - len) 93 else if (len > STACK_TOP32 || addr > STACK_TOP32 - len)
94 goto out_putf; 94 goto out_putf;
95 ret_type = flags & _MAP_NEW; 95 ret_type = flags & _MAP_NEW;
96 flags &= ~_MAP_NEW; 96 flags &= ~_MAP_NEW;
@@ -102,7 +102,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o
102 (unsigned long) prot, (unsigned long) flags, off); 102 (unsigned long) prot, (unsigned long) flags, off);
103 up_write(&current->mm->mmap_sem); 103 up_write(&current->mm->mmap_sem);
104 if(!ret_type) 104 if(!ret_type)
105 retval = ((retval < 0xf0000000) ? 0 : retval); 105 retval = ((retval < STACK_TOP32) ? 0 : retval);
106 106
107out_putf: 107out_putf:
108 if (file) 108 if (file)
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 8da3cf66894c..296708ceceb2 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -182,6 +182,9 @@ struct as_rq {
182 182
183static kmem_cache_t *arq_pool; 183static kmem_cache_t *arq_pool;
184 184
185static atomic_t ioc_count = ATOMIC_INIT(0);
186static struct completion *ioc_gone;
187
185static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); 188static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
186static void as_antic_stop(struct as_data *ad); 189static void as_antic_stop(struct as_data *ad);
187 190
@@ -193,6 +196,15 @@ static void as_antic_stop(struct as_data *ad);
193static void free_as_io_context(struct as_io_context *aic) 196static void free_as_io_context(struct as_io_context *aic)
194{ 197{
195 kfree(aic); 198 kfree(aic);
199 if (atomic_dec_and_test(&ioc_count) && ioc_gone)
200 complete(ioc_gone);
201}
202
203static void as_trim(struct io_context *ioc)
204{
205 if (ioc->aic)
206 free_as_io_context(ioc->aic);
207 ioc->aic = NULL;
196} 208}
197 209
198/* Called when the task exits */ 210/* Called when the task exits */
@@ -220,6 +232,7 @@ static struct as_io_context *alloc_as_io_context(void)
220 ret->seek_total = 0; 232 ret->seek_total = 0;
221 ret->seek_samples = 0; 233 ret->seek_samples = 0;
222 ret->seek_mean = 0; 234 ret->seek_mean = 0;
235 atomic_inc(&ioc_count);
223 } 236 }
224 237
225 return ret; 238 return ret;
@@ -1696,11 +1709,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1696/* 1709/*
1697 * sysfs parts below 1710 * sysfs parts below
1698 */ 1711 */
1699struct as_fs_entry {
1700 struct attribute attr;
1701 ssize_t (*show)(struct as_data *, char *);
1702 ssize_t (*store)(struct as_data *, const char *, size_t);
1703};
1704 1712
1705static ssize_t 1713static ssize_t
1706as_var_show(unsigned int var, char *page) 1714as_var_show(unsigned int var, char *page)
@@ -1717,8 +1725,9 @@ as_var_store(unsigned long *var, const char *page, size_t count)
1717 return count; 1725 return count;
1718} 1726}
1719 1727
1720static ssize_t as_est_show(struct as_data *ad, char *page) 1728static ssize_t est_time_show(elevator_t *e, char *page)
1721{ 1729{
1730 struct as_data *ad = e->elevator_data;
1722 int pos = 0; 1731 int pos = 0;
1723 1732
1724 pos += sprintf(page+pos, "%lu %% exit probability\n", 1733 pos += sprintf(page+pos, "%lu %% exit probability\n",
@@ -1734,21 +1743,23 @@ static ssize_t as_est_show(struct as_data *ad, char *page)
1734} 1743}
1735 1744
1736#define SHOW_FUNCTION(__FUNC, __VAR) \ 1745#define SHOW_FUNCTION(__FUNC, __VAR) \
1737static ssize_t __FUNC(struct as_data *ad, char *page) \ 1746static ssize_t __FUNC(elevator_t *e, char *page) \
1738{ \ 1747{ \
1748 struct as_data *ad = e->elevator_data; \
1739 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ 1749 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1740} 1750}
1741SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]); 1751SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]);
1742SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]); 1752SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]);
1743SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire); 1753SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
1744SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]); 1754SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]);
1745SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]); 1755SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
1746#undef SHOW_FUNCTION 1756#undef SHOW_FUNCTION
1747 1757
1748#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ 1758#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
1749static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \ 1759static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
1750{ \ 1760{ \
1751 int ret = as_var_store(__PTR, (page), count); \ 1761 struct as_data *ad = e->elevator_data; \
1762 int ret = as_var_store(__PTR, (page), count); \
1752 if (*(__PTR) < (MIN)) \ 1763 if (*(__PTR) < (MIN)) \
1753 *(__PTR) = (MIN); \ 1764 *(__PTR) = (MIN); \
1754 else if (*(__PTR) > (MAX)) \ 1765 else if (*(__PTR) > (MAX)) \
@@ -1756,90 +1767,26 @@ static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
1756 *(__PTR) = msecs_to_jiffies(*(__PTR)); \ 1767 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1757 return ret; \ 1768 return ret; \
1758} 1769}
1759STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); 1770STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
1760STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); 1771STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
1761STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX); 1772STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
1762STORE_FUNCTION(as_read_batchexpire_store, 1773STORE_FUNCTION(as_read_batch_expire_store,
1763 &ad->batch_expire[REQ_SYNC], 0, INT_MAX); 1774 &ad->batch_expire[REQ_SYNC], 0, INT_MAX);
1764STORE_FUNCTION(as_write_batchexpire_store, 1775STORE_FUNCTION(as_write_batch_expire_store,
1765 &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); 1776 &ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
1766#undef STORE_FUNCTION 1777#undef STORE_FUNCTION
1767 1778
1768static struct as_fs_entry as_est_entry = { 1779#define AS_ATTR(name) \
1769 .attr = {.name = "est_time", .mode = S_IRUGO }, 1780 __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
1770 .show = as_est_show, 1781
1771}; 1782static struct elv_fs_entry as_attrs[] = {
1772static struct as_fs_entry as_readexpire_entry = { 1783 __ATTR_RO(est_time),
1773 .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, 1784 AS_ATTR(read_expire),
1774 .show = as_readexpire_show, 1785 AS_ATTR(write_expire),
1775 .store = as_readexpire_store, 1786 AS_ATTR(antic_expire),
1776}; 1787 AS_ATTR(read_batch_expire),
1777static struct as_fs_entry as_writeexpire_entry = { 1788 AS_ATTR(write_batch_expire),
1778 .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, 1789 __ATTR_NULL
1779 .show = as_writeexpire_show,
1780 .store = as_writeexpire_store,
1781};
1782static struct as_fs_entry as_anticexpire_entry = {
1783 .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
1784 .show = as_anticexpire_show,
1785 .store = as_anticexpire_store,
1786};
1787static struct as_fs_entry as_read_batchexpire_entry = {
1788 .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
1789 .show = as_read_batchexpire_show,
1790 .store = as_read_batchexpire_store,
1791};
1792static struct as_fs_entry as_write_batchexpire_entry = {
1793 .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
1794 .show = as_write_batchexpire_show,
1795 .store = as_write_batchexpire_store,
1796};
1797
1798static struct attribute *default_attrs[] = {
1799 &as_est_entry.attr,
1800 &as_readexpire_entry.attr,
1801 &as_writeexpire_entry.attr,
1802 &as_anticexpire_entry.attr,
1803 &as_read_batchexpire_entry.attr,
1804 &as_write_batchexpire_entry.attr,
1805 NULL,
1806};
1807
1808#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
1809
1810static ssize_t
1811as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1812{
1813 elevator_t *e = container_of(kobj, elevator_t, kobj);
1814 struct as_fs_entry *entry = to_as(attr);
1815
1816 if (!entry->show)
1817 return -EIO;
1818
1819 return entry->show(e->elevator_data, page);
1820}
1821
1822static ssize_t
1823as_attr_store(struct kobject *kobj, struct attribute *attr,
1824 const char *page, size_t length)
1825{
1826 elevator_t *e = container_of(kobj, elevator_t, kobj);
1827 struct as_fs_entry *entry = to_as(attr);
1828
1829 if (!entry->store)
1830 return -EIO;
1831
1832 return entry->store(e->elevator_data, page, length);
1833}
1834
1835static struct sysfs_ops as_sysfs_ops = {
1836 .show = as_attr_show,
1837 .store = as_attr_store,
1838};
1839
1840static struct kobj_type as_ktype = {
1841 .sysfs_ops = &as_sysfs_ops,
1842 .default_attrs = default_attrs,
1843}; 1790};
1844 1791
1845static struct elevator_type iosched_as = { 1792static struct elevator_type iosched_as = {
@@ -1860,9 +1807,10 @@ static struct elevator_type iosched_as = {
1860 .elevator_may_queue_fn = as_may_queue, 1807 .elevator_may_queue_fn = as_may_queue,
1861 .elevator_init_fn = as_init_queue, 1808 .elevator_init_fn = as_init_queue,
1862 .elevator_exit_fn = as_exit_queue, 1809 .elevator_exit_fn = as_exit_queue,
1810 .trim = as_trim,
1863 }, 1811 },
1864 1812
1865 .elevator_ktype = &as_ktype, 1813 .elevator_attrs = as_attrs,
1866 .elevator_name = "anticipatory", 1814 .elevator_name = "anticipatory",
1867 .elevator_owner = THIS_MODULE, 1815 .elevator_owner = THIS_MODULE,
1868}; 1816};
@@ -1893,7 +1841,13 @@ static int __init as_init(void)
1893 1841
1894static void __exit as_exit(void) 1842static void __exit as_exit(void)
1895{ 1843{
1844 DECLARE_COMPLETION(all_gone);
1896 elv_unregister(&iosched_as); 1845 elv_unregister(&iosched_as);
1846 ioc_gone = &all_gone;
1847 barrier();
1848 if (atomic_read(&ioc_count))
1849 complete(ioc_gone);
1850 synchronize_rcu();
1897 kmem_cache_destroy(arq_pool); 1851 kmem_cache_destroy(arq_pool);
1898} 1852}
1899 1853
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c8dbe38c81c8..c4a0d5d8d7f0 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -6,21 +6,13 @@
6 * 6 *
7 * Copyright (C) 2003 Jens Axboe <axboe@suse.de> 7 * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
8 */ 8 */
9#include <linux/kernel.h>
10#include <linux/fs.h>
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
13#include <linux/bio.h>
14#include <linux/config.h> 9#include <linux/config.h>
15#include <linux/module.h> 10#include <linux/module.h>
16#include <linux/slab.h> 11#include <linux/blkdev.h>
17#include <linux/init.h> 12#include <linux/elevator.h>
18#include <linux/compiler.h>
19#include <linux/hash.h> 13#include <linux/hash.h>
20#include <linux/rbtree.h> 14#include <linux/rbtree.h>
21#include <linux/mempool.h>
22#include <linux/ioprio.h> 15#include <linux/ioprio.h>
23#include <linux/writeback.h>
24 16
25/* 17/*
26 * tunables 18 * tunables
@@ -47,6 +39,8 @@ static int cfq_slice_idle = HZ / 100;
47 */ 39 */
48static const int cfq_max_depth = 2; 40static const int cfq_max_depth = 2;
49 41
42static DEFINE_RWLOCK(cfq_exit_lock);
43
50/* 44/*
51 * for the hash of cfqq inside the cfqd 45 * for the hash of cfqq inside the cfqd
52 */ 46 */
@@ -89,6 +83,9 @@ static kmem_cache_t *crq_pool;
89static kmem_cache_t *cfq_pool; 83static kmem_cache_t *cfq_pool;
90static kmem_cache_t *cfq_ioc_pool; 84static kmem_cache_t *cfq_ioc_pool;
91 85
86static atomic_t ioc_count = ATOMIC_INIT(0);
87static struct completion *ioc_gone;
88
92#define CFQ_PRIO_LISTS IOPRIO_BE_NR 89#define CFQ_PRIO_LISTS IOPRIO_BE_NR
93#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 90#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
94#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) 91#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
@@ -109,7 +106,6 @@ static kmem_cache_t *cfq_ioc_pool;
109 * Per block device queue structure 106 * Per block device queue structure
110 */ 107 */
111struct cfq_data { 108struct cfq_data {
112 atomic_t ref;
113 request_queue_t *queue; 109 request_queue_t *queue;
114 110
115 /* 111 /*
@@ -175,6 +171,8 @@ struct cfq_data {
175 unsigned int cfq_slice_async_rq; 171 unsigned int cfq_slice_async_rq;
176 unsigned int cfq_slice_idle; 172 unsigned int cfq_slice_idle;
177 unsigned int cfq_max_depth; 173 unsigned int cfq_max_depth;
174
175 struct list_head cic_list;
178}; 176};
179 177
180/* 178/*
@@ -288,7 +286,7 @@ CFQ_CRQ_FNS(is_sync);
288 286
289static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 287static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
290static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); 288static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
291static void cfq_put_cfqd(struct cfq_data *cfqd); 289static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
292 290
293#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) 291#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
294 292
@@ -1160,8 +1158,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1160 if (unlikely(cfqd->active_queue == cfqq)) 1158 if (unlikely(cfqd->active_queue == cfqq))
1161 __cfq_slice_expired(cfqd, cfqq, 0); 1159 __cfq_slice_expired(cfqd, cfqq, 0);
1162 1160
1163 cfq_put_cfqd(cfqq->cfqd);
1164
1165 /* 1161 /*
1166 * it's on the empty list and still hashed 1162 * it's on the empty list and still hashed
1167 */ 1163 */
@@ -1179,7 +1175,7 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1179 1175
1180 hlist_for_each_safe(entry, next, hash_list) { 1176 hlist_for_each_safe(entry, next, hash_list) {
1181 struct cfq_queue *__cfqq = list_entry_qhash(entry); 1177 struct cfq_queue *__cfqq = list_entry_qhash(entry);
1182 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio); 1178 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1183 1179
1184 if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) 1180 if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
1185 return __cfqq; 1181 return __cfqq;
@@ -1198,13 +1194,24 @@ static void cfq_free_io_context(struct cfq_io_context *cic)
1198{ 1194{
1199 struct cfq_io_context *__cic; 1195 struct cfq_io_context *__cic;
1200 struct list_head *entry, *next; 1196 struct list_head *entry, *next;
1197 int freed = 1;
1201 1198
1202 list_for_each_safe(entry, next, &cic->list) { 1199 list_for_each_safe(entry, next, &cic->list) {
1203 __cic = list_entry(entry, struct cfq_io_context, list); 1200 __cic = list_entry(entry, struct cfq_io_context, list);
1204 kmem_cache_free(cfq_ioc_pool, __cic); 1201 kmem_cache_free(cfq_ioc_pool, __cic);
1202 freed++;
1205 } 1203 }
1206 1204
1207 kmem_cache_free(cfq_ioc_pool, cic); 1205 kmem_cache_free(cfq_ioc_pool, cic);
1206 if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
1207 complete(ioc_gone);
1208}
1209
1210static void cfq_trim(struct io_context *ioc)
1211{
1212 ioc->set_ioprio = NULL;
1213 if (ioc->cic)
1214 cfq_free_io_context(ioc->cic);
1208} 1215}
1209 1216
1210/* 1217/*
@@ -1212,25 +1219,37 @@ static void cfq_free_io_context(struct cfq_io_context *cic)
1212 */ 1219 */
1213static void cfq_exit_single_io_context(struct cfq_io_context *cic) 1220static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1214{ 1221{
1215 struct cfq_data *cfqd = cic->cfqq->cfqd; 1222 struct cfq_data *cfqd = cic->key;
1216 request_queue_t *q = cfqd->queue; 1223 request_queue_t *q;
1224
1225 if (!cfqd)
1226 return;
1227
1228 q = cfqd->queue;
1217 1229
1218 WARN_ON(!irqs_disabled()); 1230 WARN_ON(!irqs_disabled());
1219 1231
1220 spin_lock(q->queue_lock); 1232 spin_lock(q->queue_lock);
1221 1233
1222 if (unlikely(cic->cfqq == cfqd->active_queue)) 1234 if (cic->cfqq[ASYNC]) {
1223 __cfq_slice_expired(cfqd, cic->cfqq, 0); 1235 if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue))
1236 __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0);
1237 cfq_put_queue(cic->cfqq[ASYNC]);
1238 cic->cfqq[ASYNC] = NULL;
1239 }
1240
1241 if (cic->cfqq[SYNC]) {
1242 if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue))
1243 __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0);
1244 cfq_put_queue(cic->cfqq[SYNC]);
1245 cic->cfqq[SYNC] = NULL;
1246 }
1224 1247
1225 cfq_put_queue(cic->cfqq); 1248 cic->key = NULL;
1226 cic->cfqq = NULL; 1249 list_del_init(&cic->queue_list);
1227 spin_unlock(q->queue_lock); 1250 spin_unlock(q->queue_lock);
1228} 1251}
1229 1252
1230/*
1231 * Another task may update the task cic list, if it is doing a queue lookup
1232 * on its behalf. cfq_cic_lock excludes such concurrent updates
1233 */
1234static void cfq_exit_io_context(struct cfq_io_context *cic) 1253static void cfq_exit_io_context(struct cfq_io_context *cic)
1235{ 1254{
1236 struct cfq_io_context *__cic; 1255 struct cfq_io_context *__cic;
@@ -1242,12 +1261,14 @@ static void cfq_exit_io_context(struct cfq_io_context *cic)
1242 /* 1261 /*
1243 * put the reference this task is holding to the various queues 1262 * put the reference this task is holding to the various queues
1244 */ 1263 */
1264 read_lock(&cfq_exit_lock);
1245 list_for_each(entry, &cic->list) { 1265 list_for_each(entry, &cic->list) {
1246 __cic = list_entry(entry, struct cfq_io_context, list); 1266 __cic = list_entry(entry, struct cfq_io_context, list);
1247 cfq_exit_single_io_context(__cic); 1267 cfq_exit_single_io_context(__cic);
1248 } 1268 }
1249 1269
1250 cfq_exit_single_io_context(cic); 1270 cfq_exit_single_io_context(cic);
1271 read_unlock(&cfq_exit_lock);
1251 local_irq_restore(flags); 1272 local_irq_restore(flags);
1252} 1273}
1253 1274
@@ -1258,7 +1279,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1258 1279
1259 if (cic) { 1280 if (cic) {
1260 INIT_LIST_HEAD(&cic->list); 1281 INIT_LIST_HEAD(&cic->list);
1261 cic->cfqq = NULL; 1282 cic->cfqq[ASYNC] = NULL;
1283 cic->cfqq[SYNC] = NULL;
1262 cic->key = NULL; 1284 cic->key = NULL;
1263 cic->last_end_request = jiffies; 1285 cic->last_end_request = jiffies;
1264 cic->ttime_total = 0; 1286 cic->ttime_total = 0;
@@ -1266,6 +1288,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1266 cic->ttime_mean = 0; 1288 cic->ttime_mean = 0;
1267 cic->dtor = cfq_free_io_context; 1289 cic->dtor = cfq_free_io_context;
1268 cic->exit = cfq_exit_io_context; 1290 cic->exit = cfq_exit_io_context;
1291 INIT_LIST_HEAD(&cic->queue_list);
1292 atomic_inc(&ioc_count);
1269 } 1293 }
1270 1294
1271 return cic; 1295 return cic;
@@ -1318,14 +1342,27 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
1318 cfq_clear_cfqq_prio_changed(cfqq); 1342 cfq_clear_cfqq_prio_changed(cfqq);
1319} 1343}
1320 1344
1321static inline void changed_ioprio(struct cfq_queue *cfqq) 1345static inline void changed_ioprio(struct cfq_io_context *cic)
1322{ 1346{
1323 if (cfqq) { 1347 struct cfq_data *cfqd = cic->key;
1324 struct cfq_data *cfqd = cfqq->cfqd; 1348 struct cfq_queue *cfqq;
1325 1349 if (cfqd) {
1326 spin_lock(cfqd->queue->queue_lock); 1350 spin_lock(cfqd->queue->queue_lock);
1327 cfq_mark_cfqq_prio_changed(cfqq); 1351 cfqq = cic->cfqq[ASYNC];
1328 cfq_init_prio_data(cfqq); 1352 if (cfqq) {
1353 struct cfq_queue *new_cfqq;
1354 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
1355 cic->ioc->task, GFP_ATOMIC);
1356 if (new_cfqq) {
1357 cic->cfqq[ASYNC] = new_cfqq;
1358 cfq_put_queue(cfqq);
1359 }
1360 }
1361 cfqq = cic->cfqq[SYNC];
1362 if (cfqq) {
1363 cfq_mark_cfqq_prio_changed(cfqq);
1364 cfq_init_prio_data(cfqq);
1365 }
1329 spin_unlock(cfqd->queue->queue_lock); 1366 spin_unlock(cfqd->queue->queue_lock);
1330 } 1367 }
1331} 1368}
@@ -1335,24 +1372,32 @@ static inline void changed_ioprio(struct cfq_queue *cfqq)
1335 */ 1372 */
1336static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) 1373static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1337{ 1374{
1338 struct cfq_io_context *cic = ioc->cic; 1375 struct cfq_io_context *cic;
1376
1377 write_lock(&cfq_exit_lock);
1378
1379 cic = ioc->cic;
1339 1380
1340 changed_ioprio(cic->cfqq); 1381 changed_ioprio(cic);
1341 1382
1342 list_for_each_entry(cic, &cic->list, list) 1383 list_for_each_entry(cic, &cic->list, list)
1343 changed_ioprio(cic->cfqq); 1384 changed_ioprio(cic);
1385
1386 write_unlock(&cfq_exit_lock);
1344 1387
1345 return 0; 1388 return 0;
1346} 1389}
1347 1390
1348static struct cfq_queue * 1391static struct cfq_queue *
1349cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, 1392cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
1350 gfp_t gfp_mask) 1393 gfp_t gfp_mask)
1351{ 1394{
1352 const int hashval = hash_long(key, CFQ_QHASH_SHIFT); 1395 const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1353 struct cfq_queue *cfqq, *new_cfqq = NULL; 1396 struct cfq_queue *cfqq, *new_cfqq = NULL;
1397 unsigned short ioprio;
1354 1398
1355retry: 1399retry:
1400 ioprio = tsk->ioprio;
1356 cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); 1401 cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1357 1402
1358 if (!cfqq) { 1403 if (!cfqq) {
@@ -1381,7 +1426,6 @@ retry:
1381 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); 1426 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1382 atomic_set(&cfqq->ref, 0); 1427 atomic_set(&cfqq->ref, 0);
1383 cfqq->cfqd = cfqd; 1428 cfqq->cfqd = cfqd;
1384 atomic_inc(&cfqd->ref);
1385 cfqq->service_last = 0; 1429 cfqq->service_last = 0;
1386 /* 1430 /*
1387 * set ->slice_left to allow preemption for a new process 1431 * set ->slice_left to allow preemption for a new process
@@ -1419,6 +1463,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
1419 if (!ioc) 1463 if (!ioc)
1420 return NULL; 1464 return NULL;
1421 1465
1466restart:
1422 if ((cic = ioc->cic) == NULL) { 1467 if ((cic = ioc->cic) == NULL) {
1423 cic = cfq_alloc_io_context(cfqd, gfp_mask); 1468 cic = cfq_alloc_io_context(cfqd, gfp_mask);
1424 1469
@@ -1429,11 +1474,13 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
1429 * manually increment generic io_context usage count, it 1474 * manually increment generic io_context usage count, it
1430 * cannot go away since we are already holding one ref to it 1475 * cannot go away since we are already holding one ref to it
1431 */ 1476 */
1432 ioc->cic = cic;
1433 ioc->set_ioprio = cfq_ioc_set_ioprio;
1434 cic->ioc = ioc; 1477 cic->ioc = ioc;
1435 cic->key = cfqd; 1478 cic->key = cfqd;
1436 atomic_inc(&cfqd->ref); 1479 read_lock(&cfq_exit_lock);
1480 ioc->set_ioprio = cfq_ioc_set_ioprio;
1481 ioc->cic = cic;
1482 list_add(&cic->queue_list, &cfqd->cic_list);
1483 read_unlock(&cfq_exit_lock);
1437 } else { 1484 } else {
1438 struct cfq_io_context *__cic; 1485 struct cfq_io_context *__cic;
1439 1486
@@ -1443,6 +1490,20 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
1443 if (cic->key == cfqd) 1490 if (cic->key == cfqd)
1444 goto out; 1491 goto out;
1445 1492
1493 if (unlikely(!cic->key)) {
1494 read_lock(&cfq_exit_lock);
1495 if (list_empty(&cic->list))
1496 ioc->cic = NULL;
1497 else
1498 ioc->cic = list_entry(cic->list.next,
1499 struct cfq_io_context,
1500 list);
1501 read_unlock(&cfq_exit_lock);
1502 kmem_cache_free(cfq_ioc_pool, cic);
1503 atomic_dec(&ioc_count);
1504 goto restart;
1505 }
1506
1446 /* 1507 /*
1447 * cic exists, check if we already are there. linear search 1508 * cic exists, check if we already are there. linear search
1448 * should be ok here, the list will usually not be more than 1509 * should be ok here, the list will usually not be more than
@@ -1457,6 +1518,14 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
1457 cic = __cic; 1518 cic = __cic;
1458 goto out; 1519 goto out;
1459 } 1520 }
1521 if (unlikely(!__cic->key)) {
1522 read_lock(&cfq_exit_lock);
1523 list_del(&__cic->list);
1524 read_unlock(&cfq_exit_lock);
1525 kmem_cache_free(cfq_ioc_pool, __cic);
1526 atomic_dec(&ioc_count);
1527 goto restart;
1528 }
1460 } 1529 }
1461 1530
1462 /* 1531 /*
@@ -1469,8 +1538,10 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
1469 1538
1470 __cic->ioc = ioc; 1539 __cic->ioc = ioc;
1471 __cic->key = cfqd; 1540 __cic->key = cfqd;
1472 atomic_inc(&cfqd->ref); 1541 read_lock(&cfq_exit_lock);
1473 list_add(&__cic->list, &cic->list); 1542 list_add(&__cic->list, &cic->list);
1543 list_add(&__cic->queue_list, &cfqd->cic_list);
1544 read_unlock(&cfq_exit_lock);
1474 cic = __cic; 1545 cic = __cic;
1475 } 1546 }
1476 1547
@@ -1890,6 +1961,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1890 struct cfq_queue *cfqq; 1961 struct cfq_queue *cfqq;
1891 struct cfq_rq *crq; 1962 struct cfq_rq *crq;
1892 unsigned long flags; 1963 unsigned long flags;
1964 int is_sync = key != CFQ_KEY_ASYNC;
1893 1965
1894 might_sleep_if(gfp_mask & __GFP_WAIT); 1966 might_sleep_if(gfp_mask & __GFP_WAIT);
1895 1967
@@ -1900,14 +1972,14 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1900 if (!cic) 1972 if (!cic)
1901 goto queue_fail; 1973 goto queue_fail;
1902 1974
1903 if (!cic->cfqq) { 1975 if (!cic->cfqq[is_sync]) {
1904 cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); 1976 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
1905 if (!cfqq) 1977 if (!cfqq)
1906 goto queue_fail; 1978 goto queue_fail;
1907 1979
1908 cic->cfqq = cfqq; 1980 cic->cfqq[is_sync] = cfqq;
1909 } else 1981 } else
1910 cfqq = cic->cfqq; 1982 cfqq = cic->cfqq[is_sync];
1911 1983
1912 cfqq->allocated[rw]++; 1984 cfqq->allocated[rw]++;
1913 cfq_clear_cfqq_must_alloc(cfqq); 1985 cfq_clear_cfqq_must_alloc(cfqq);
@@ -1924,7 +1996,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1924 crq->cfq_queue = cfqq; 1996 crq->cfq_queue = cfqq;
1925 crq->io_context = cic; 1997 crq->io_context = cic;
1926 1998
1927 if (rw == READ || process_sync(tsk)) 1999 if (is_sync)
1928 cfq_mark_crq_is_sync(crq); 2000 cfq_mark_crq_is_sync(crq);
1929 else 2001 else
1930 cfq_clear_crq_is_sync(crq); 2002 cfq_clear_crq_is_sync(crq);
@@ -2055,15 +2127,35 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2055 blk_sync_queue(cfqd->queue); 2127 blk_sync_queue(cfqd->queue);
2056} 2128}
2057 2129
2058static void cfq_put_cfqd(struct cfq_data *cfqd) 2130static void cfq_exit_queue(elevator_t *e)
2059{ 2131{
2132 struct cfq_data *cfqd = e->elevator_data;
2060 request_queue_t *q = cfqd->queue; 2133 request_queue_t *q = cfqd->queue;
2061 2134
2062 if (!atomic_dec_and_test(&cfqd->ref)) 2135 cfq_shutdown_timer_wq(cfqd);
2063 return; 2136 write_lock(&cfq_exit_lock);
2137 spin_lock_irq(q->queue_lock);
2138 if (cfqd->active_queue)
2139 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2140 while(!list_empty(&cfqd->cic_list)) {
2141 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2142 struct cfq_io_context,
2143 queue_list);
2144 if (cic->cfqq[ASYNC]) {
2145 cfq_put_queue(cic->cfqq[ASYNC]);
2146 cic->cfqq[ASYNC] = NULL;
2147 }
2148 if (cic->cfqq[SYNC]) {
2149 cfq_put_queue(cic->cfqq[SYNC]);
2150 cic->cfqq[SYNC] = NULL;
2151 }
2152 cic->key = NULL;
2153 list_del_init(&cic->queue_list);
2154 }
2155 spin_unlock_irq(q->queue_lock);
2156 write_unlock(&cfq_exit_lock);
2064 2157
2065 cfq_shutdown_timer_wq(cfqd); 2158 cfq_shutdown_timer_wq(cfqd);
2066 blk_put_queue(q);
2067 2159
2068 mempool_destroy(cfqd->crq_pool); 2160 mempool_destroy(cfqd->crq_pool);
2069 kfree(cfqd->crq_hash); 2161 kfree(cfqd->crq_hash);
@@ -2071,14 +2163,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
2071 kfree(cfqd); 2163 kfree(cfqd);
2072} 2164}
2073 2165
2074static void cfq_exit_queue(elevator_t *e)
2075{
2076 struct cfq_data *cfqd = e->elevator_data;
2077
2078 cfq_shutdown_timer_wq(cfqd);
2079 cfq_put_cfqd(cfqd);
2080}
2081
2082static int cfq_init_queue(request_queue_t *q, elevator_t *e) 2166static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2083{ 2167{
2084 struct cfq_data *cfqd; 2168 struct cfq_data *cfqd;
@@ -2097,6 +2181,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2097 INIT_LIST_HEAD(&cfqd->cur_rr); 2181 INIT_LIST_HEAD(&cfqd->cur_rr);
2098 INIT_LIST_HEAD(&cfqd->idle_rr); 2182 INIT_LIST_HEAD(&cfqd->idle_rr);
2099 INIT_LIST_HEAD(&cfqd->empty_list); 2183 INIT_LIST_HEAD(&cfqd->empty_list);
2184 INIT_LIST_HEAD(&cfqd->cic_list);
2100 2185
2101 cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); 2186 cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
2102 if (!cfqd->crq_hash) 2187 if (!cfqd->crq_hash)
@@ -2118,7 +2203,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2118 e->elevator_data = cfqd; 2203 e->elevator_data = cfqd;
2119 2204
2120 cfqd->queue = q; 2205 cfqd->queue = q;
2121 atomic_inc(&q->refcnt);
2122 2206
2123 cfqd->max_queued = q->nr_requests / 4; 2207 cfqd->max_queued = q->nr_requests / 4;
2124 q->nr_batching = cfq_queued; 2208 q->nr_batching = cfq_queued;
@@ -2133,8 +2217,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2133 2217
2134 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); 2218 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
2135 2219
2136 atomic_set(&cfqd->ref, 1);
2137
2138 cfqd->cfq_queued = cfq_queued; 2220 cfqd->cfq_queued = cfq_queued;
2139 cfqd->cfq_quantum = cfq_quantum; 2221 cfqd->cfq_quantum = cfq_quantum;
2140 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2222 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
@@ -2193,11 +2275,6 @@ fail:
2193/* 2275/*
2194 * sysfs parts below --> 2276 * sysfs parts below -->
2195 */ 2277 */
2196struct cfq_fs_entry {
2197 struct attribute attr;
2198 ssize_t (*show)(struct cfq_data *, char *);
2199 ssize_t (*store)(struct cfq_data *, const char *, size_t);
2200};
2201 2278
2202static ssize_t 2279static ssize_t
2203cfq_var_show(unsigned int var, char *page) 2280cfq_var_show(unsigned int var, char *page)
@@ -2215,8 +2292,9 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
2215} 2292}
2216 2293
2217#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 2294#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
2218static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ 2295static ssize_t __FUNC(elevator_t *e, char *page) \
2219{ \ 2296{ \
2297 struct cfq_data *cfqd = e->elevator_data; \
2220 unsigned int __data = __VAR; \ 2298 unsigned int __data = __VAR; \
2221 if (__CONV) \ 2299 if (__CONV) \
2222 __data = jiffies_to_msecs(__data); \ 2300 __data = jiffies_to_msecs(__data); \
@@ -2226,8 +2304,8 @@ SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2226SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); 2304SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
2227SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 2305SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2228SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 2306SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2229SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); 2307SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2230SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); 2308SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2231SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 2309SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2232SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2310SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2233SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2311SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
@@ -2236,8 +2314,9 @@ SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
2236#undef SHOW_FUNCTION 2314#undef SHOW_FUNCTION
2237 2315
2238#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2316#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
2239static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ 2317static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
2240{ \ 2318{ \
2319 struct cfq_data *cfqd = e->elevator_data; \
2241 unsigned int __data; \ 2320 unsigned int __data; \
2242 int ret = cfq_var_store(&__data, (page), count); \ 2321 int ret = cfq_var_store(&__data, (page), count); \
2243 if (__data < (MIN)) \ 2322 if (__data < (MIN)) \
@@ -2254,8 +2333,8 @@ STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2254STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); 2333STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
2255STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); 2334STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2256STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); 2335STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2257STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2336STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2258STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); 2337STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2259STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2338STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2260STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2339STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2261STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2340STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
@@ -2263,112 +2342,22 @@ STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX,
2263STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); 2342STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
2264#undef STORE_FUNCTION 2343#undef STORE_FUNCTION
2265 2344
2266static struct cfq_fs_entry cfq_quantum_entry = { 2345#define CFQ_ATTR(name) \
2267 .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, 2346 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2268 .show = cfq_quantum_show, 2347
2269 .store = cfq_quantum_store, 2348static struct elv_fs_entry cfq_attrs[] = {
2270}; 2349 CFQ_ATTR(quantum),
2271static struct cfq_fs_entry cfq_queued_entry = { 2350 CFQ_ATTR(queued),
2272 .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, 2351 CFQ_ATTR(fifo_expire_sync),
2273 .show = cfq_queued_show, 2352 CFQ_ATTR(fifo_expire_async),
2274 .store = cfq_queued_store, 2353 CFQ_ATTR(back_seek_max),
2275}; 2354 CFQ_ATTR(back_seek_penalty),
2276static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { 2355 CFQ_ATTR(slice_sync),
2277 .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, 2356 CFQ_ATTR(slice_async),
2278 .show = cfq_fifo_expire_sync_show, 2357 CFQ_ATTR(slice_async_rq),
2279 .store = cfq_fifo_expire_sync_store, 2358 CFQ_ATTR(slice_idle),
2280}; 2359 CFQ_ATTR(max_depth),
2281static struct cfq_fs_entry cfq_fifo_expire_async_entry = { 2360 __ATTR_NULL
2282 .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
2283 .show = cfq_fifo_expire_async_show,
2284 .store = cfq_fifo_expire_async_store,
2285};
2286static struct cfq_fs_entry cfq_back_max_entry = {
2287 .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
2288 .show = cfq_back_max_show,
2289 .store = cfq_back_max_store,
2290};
2291static struct cfq_fs_entry cfq_back_penalty_entry = {
2292 .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
2293 .show = cfq_back_penalty_show,
2294 .store = cfq_back_penalty_store,
2295};
2296static struct cfq_fs_entry cfq_slice_sync_entry = {
2297 .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
2298 .show = cfq_slice_sync_show,
2299 .store = cfq_slice_sync_store,
2300};
2301static struct cfq_fs_entry cfq_slice_async_entry = {
2302 .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
2303 .show = cfq_slice_async_show,
2304 .store = cfq_slice_async_store,
2305};
2306static struct cfq_fs_entry cfq_slice_async_rq_entry = {
2307 .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
2308 .show = cfq_slice_async_rq_show,
2309 .store = cfq_slice_async_rq_store,
2310};
2311static struct cfq_fs_entry cfq_slice_idle_entry = {
2312 .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
2313 .show = cfq_slice_idle_show,
2314 .store = cfq_slice_idle_store,
2315};
2316static struct cfq_fs_entry cfq_max_depth_entry = {
2317 .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
2318 .show = cfq_max_depth_show,
2319 .store = cfq_max_depth_store,
2320};
2321
2322static struct attribute *default_attrs[] = {
2323 &cfq_quantum_entry.attr,
2324 &cfq_queued_entry.attr,
2325 &cfq_fifo_expire_sync_entry.attr,
2326 &cfq_fifo_expire_async_entry.attr,
2327 &cfq_back_max_entry.attr,
2328 &cfq_back_penalty_entry.attr,
2329 &cfq_slice_sync_entry.attr,
2330 &cfq_slice_async_entry.attr,
2331 &cfq_slice_async_rq_entry.attr,
2332 &cfq_slice_idle_entry.attr,
2333 &cfq_max_depth_entry.attr,
2334 NULL,
2335};
2336
2337#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
2338
2339static ssize_t
2340cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2341{
2342 elevator_t *e = container_of(kobj, elevator_t, kobj);
2343 struct cfq_fs_entry *entry = to_cfq(attr);
2344
2345 if (!entry->show)
2346 return -EIO;
2347
2348 return entry->show(e->elevator_data, page);
2349}
2350
2351static ssize_t
2352cfq_attr_store(struct kobject *kobj, struct attribute *attr,
2353 const char *page, size_t length)
2354{
2355 elevator_t *e = container_of(kobj, elevator_t, kobj);
2356 struct cfq_fs_entry *entry = to_cfq(attr);
2357
2358 if (!entry->store)
2359 return -EIO;
2360
2361 return entry->store(e->elevator_data, page, length);
2362}
2363
2364static struct sysfs_ops cfq_sysfs_ops = {
2365 .show = cfq_attr_show,
2366 .store = cfq_attr_store,
2367};
2368
2369static struct kobj_type cfq_ktype = {
2370 .sysfs_ops = &cfq_sysfs_ops,
2371 .default_attrs = default_attrs,
2372}; 2361};
2373 2362
2374static struct elevator_type iosched_cfq = { 2363static struct elevator_type iosched_cfq = {
@@ -2389,8 +2378,9 @@ static struct elevator_type iosched_cfq = {
2389 .elevator_may_queue_fn = cfq_may_queue, 2378 .elevator_may_queue_fn = cfq_may_queue,
2390 .elevator_init_fn = cfq_init_queue, 2379 .elevator_init_fn = cfq_init_queue,
2391 .elevator_exit_fn = cfq_exit_queue, 2380 .elevator_exit_fn = cfq_exit_queue,
2381 .trim = cfq_trim,
2392 }, 2382 },
2393 .elevator_ktype = &cfq_ktype, 2383 .elevator_attrs = cfq_attrs,
2394 .elevator_name = "cfq", 2384 .elevator_name = "cfq",
2395 .elevator_owner = THIS_MODULE, 2385 .elevator_owner = THIS_MODULE,
2396}; 2386};
@@ -2419,7 +2409,13 @@ static int __init cfq_init(void)
2419 2409
2420static void __exit cfq_exit(void) 2410static void __exit cfq_exit(void)
2421{ 2411{
2412 DECLARE_COMPLETION(all_gone);
2422 elv_unregister(&iosched_cfq); 2413 elv_unregister(&iosched_cfq);
2414 ioc_gone = &all_gone;
2415 barrier();
2416 if (atomic_read(&ioc_count))
2417 complete(ioc_gone);
2418 synchronize_rcu();
2423 cfq_slab_kill(); 2419 cfq_slab_kill();
2424} 2420}
2425 2421
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 27e494b1bf97..399fa1e60e1f 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -694,11 +694,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
694/* 694/*
695 * sysfs parts below 695 * sysfs parts below
696 */ 696 */
697struct deadline_fs_entry {
698 struct attribute attr;
699 ssize_t (*show)(struct deadline_data *, char *);
700 ssize_t (*store)(struct deadline_data *, const char *, size_t);
701};
702 697
703static ssize_t 698static ssize_t
704deadline_var_show(int var, char *page) 699deadline_var_show(int var, char *page)
@@ -716,23 +711,25 @@ deadline_var_store(int *var, const char *page, size_t count)
716} 711}
717 712
718#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 713#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
719static ssize_t __FUNC(struct deadline_data *dd, char *page) \ 714static ssize_t __FUNC(elevator_t *e, char *page) \
720{ \ 715{ \
721 int __data = __VAR; \ 716 struct deadline_data *dd = e->elevator_data; \
717 int __data = __VAR; \
722 if (__CONV) \ 718 if (__CONV) \
723 __data = jiffies_to_msecs(__data); \ 719 __data = jiffies_to_msecs(__data); \
724 return deadline_var_show(__data, (page)); \ 720 return deadline_var_show(__data, (page)); \
725} 721}
726SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1); 722SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
727SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1); 723SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
728SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0); 724SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
729SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0); 725SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
730SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0); 726SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
731#undef SHOW_FUNCTION 727#undef SHOW_FUNCTION
732 728
733#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 729#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
734static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \ 730static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
735{ \ 731{ \
732 struct deadline_data *dd = e->elevator_data; \
736 int __data; \ 733 int __data; \
737 int ret = deadline_var_store(&__data, (page), count); \ 734 int ret = deadline_var_store(&__data, (page), count); \
738 if (__data < (MIN)) \ 735 if (__data < (MIN)) \
@@ -745,83 +742,24 @@ static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count)
745 *(__PTR) = __data; \ 742 *(__PTR) = __data; \
746 return ret; \ 743 return ret; \
747} 744}
748STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); 745STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
749STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); 746STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
750STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); 747STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
751STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0); 748STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
752STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0); 749STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
753#undef STORE_FUNCTION 750#undef STORE_FUNCTION
754 751
755static struct deadline_fs_entry deadline_readexpire_entry = { 752#define DD_ATTR(name) \
756 .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, 753 __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
757 .show = deadline_readexpire_show, 754 deadline_##name##_store)
758 .store = deadline_readexpire_store, 755
759}; 756static struct elv_fs_entry deadline_attrs[] = {
760static struct deadline_fs_entry deadline_writeexpire_entry = { 757 DD_ATTR(read_expire),
761 .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, 758 DD_ATTR(write_expire),
762 .show = deadline_writeexpire_show, 759 DD_ATTR(writes_starved),
763 .store = deadline_writeexpire_store, 760 DD_ATTR(front_merges),
764}; 761 DD_ATTR(fifo_batch),
765static struct deadline_fs_entry deadline_writesstarved_entry = { 762 __ATTR_NULL
766 .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
767 .show = deadline_writesstarved_show,
768 .store = deadline_writesstarved_store,
769};
770static struct deadline_fs_entry deadline_frontmerges_entry = {
771 .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
772 .show = deadline_frontmerges_show,
773 .store = deadline_frontmerges_store,
774};
775static struct deadline_fs_entry deadline_fifobatch_entry = {
776 .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
777 .show = deadline_fifobatch_show,
778 .store = deadline_fifobatch_store,
779};
780
781static struct attribute *default_attrs[] = {
782 &deadline_readexpire_entry.attr,
783 &deadline_writeexpire_entry.attr,
784 &deadline_writesstarved_entry.attr,
785 &deadline_frontmerges_entry.attr,
786 &deadline_fifobatch_entry.attr,
787 NULL,
788};
789
790#define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
791
792static ssize_t
793deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
794{
795 elevator_t *e = container_of(kobj, elevator_t, kobj);
796 struct deadline_fs_entry *entry = to_deadline(attr);
797
798 if (!entry->show)
799 return -EIO;
800
801 return entry->show(e->elevator_data, page);
802}
803
804static ssize_t
805deadline_attr_store(struct kobject *kobj, struct attribute *attr,
806 const char *page, size_t length)
807{
808 elevator_t *e = container_of(kobj, elevator_t, kobj);
809 struct deadline_fs_entry *entry = to_deadline(attr);
810
811 if (!entry->store)
812 return -EIO;
813
814 return entry->store(e->elevator_data, page, length);
815}
816
817static struct sysfs_ops deadline_sysfs_ops = {
818 .show = deadline_attr_show,
819 .store = deadline_attr_store,
820};
821
822static struct kobj_type deadline_ktype = {
823 .sysfs_ops = &deadline_sysfs_ops,
824 .default_attrs = default_attrs,
825}; 763};
826 764
827static struct elevator_type iosched_deadline = { 765static struct elevator_type iosched_deadline = {
@@ -840,7 +778,7 @@ static struct elevator_type iosched_deadline = {
840 .elevator_exit_fn = deadline_exit_queue, 778 .elevator_exit_fn = deadline_exit_queue,
841 }, 779 },
842 780
843 .elevator_ktype = &deadline_ktype, 781 .elevator_attrs = deadline_attrs,
844 .elevator_name = "deadline", 782 .elevator_name = "deadline",
845 .elevator_owner = THIS_MODULE, 783 .elevator_owner = THIS_MODULE,
846}; 784};
diff --git a/block/elevator.c b/block/elevator.c
index 24b702d649a9..db3d0d8296a0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -120,15 +120,10 @@ static struct elevator_type *elevator_get(const char *name)
120 return e; 120 return e;
121} 121}
122 122
123static int elevator_attach(request_queue_t *q, struct elevator_type *e, 123static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
124 struct elevator_queue *eq)
125{ 124{
126 int ret = 0; 125 int ret = 0;
127 126
128 memset(eq, 0, sizeof(*eq));
129 eq->ops = &e->ops;
130 eq->elevator_type = e;
131
132 q->elevator = eq; 127 q->elevator = eq;
133 128
134 if (eq->ops->elevator_init_fn) 129 if (eq->ops->elevator_init_fn)
@@ -154,6 +149,32 @@ static int __init elevator_setup(char *str)
154 149
155__setup("elevator=", elevator_setup); 150__setup("elevator=", elevator_setup);
156 151
152static struct kobj_type elv_ktype;
153
154static elevator_t *elevator_alloc(struct elevator_type *e)
155{
156 elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
157 if (eq) {
158 memset(eq, 0, sizeof(*eq));
159 eq->ops = &e->ops;
160 eq->elevator_type = e;
161 kobject_init(&eq->kobj);
162 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
163 eq->kobj.ktype = &elv_ktype;
164 mutex_init(&eq->sysfs_lock);
165 } else {
166 elevator_put(e);
167 }
168 return eq;
169}
170
171static void elevator_release(struct kobject *kobj)
172{
173 elevator_t *e = container_of(kobj, elevator_t, kobj);
174 elevator_put(e->elevator_type);
175 kfree(e);
176}
177
157int elevator_init(request_queue_t *q, char *name) 178int elevator_init(request_queue_t *q, char *name)
158{ 179{
159 struct elevator_type *e = NULL; 180 struct elevator_type *e = NULL;
@@ -176,29 +197,26 @@ int elevator_init(request_queue_t *q, char *name)
176 e = elevator_get("noop"); 197 e = elevator_get("noop");
177 } 198 }
178 199
179 eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); 200 eq = elevator_alloc(e);
180 if (!eq) { 201 if (!eq)
181 elevator_put(e);
182 return -ENOMEM; 202 return -ENOMEM;
183 }
184 203
185 ret = elevator_attach(q, e, eq); 204 ret = elevator_attach(q, eq);
186 if (ret) { 205 if (ret)
187 kfree(eq); 206 kobject_put(&eq->kobj);
188 elevator_put(e);
189 }
190 207
191 return ret; 208 return ret;
192} 209}
193 210
194void elevator_exit(elevator_t *e) 211void elevator_exit(elevator_t *e)
195{ 212{
213 mutex_lock(&e->sysfs_lock);
196 if (e->ops->elevator_exit_fn) 214 if (e->ops->elevator_exit_fn)
197 e->ops->elevator_exit_fn(e); 215 e->ops->elevator_exit_fn(e);
216 e->ops = NULL;
217 mutex_unlock(&e->sysfs_lock);
198 218
199 elevator_put(e->elevator_type); 219 kobject_put(&e->kobj);
200 e->elevator_type = NULL;
201 kfree(e);
202} 220}
203 221
204/* 222/*
@@ -627,26 +645,79 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
627 } 645 }
628} 646}
629 647
630int elv_register_queue(struct request_queue *q) 648#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
649
650static ssize_t
651elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
631{ 652{
632 elevator_t *e = q->elevator; 653 elevator_t *e = container_of(kobj, elevator_t, kobj);
654 struct elv_fs_entry *entry = to_elv(attr);
655 ssize_t error;
633 656
634 e->kobj.parent = kobject_get(&q->kobj); 657 if (!entry->show)
635 if (!e->kobj.parent) 658 return -EIO;
636 return -EBUSY;
637 659
638 snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); 660 mutex_lock(&e->sysfs_lock);
639 e->kobj.ktype = e->elevator_type->elevator_ktype; 661 error = e->ops ? entry->show(e, page) : -ENOENT;
662 mutex_unlock(&e->sysfs_lock);
663 return error;
664}
665
666static ssize_t
667elv_attr_store(struct kobject *kobj, struct attribute *attr,
668 const char *page, size_t length)
669{
670 elevator_t *e = container_of(kobj, elevator_t, kobj);
671 struct elv_fs_entry *entry = to_elv(attr);
672 ssize_t error;
673
674 if (!entry->store)
675 return -EIO;
676
677 mutex_lock(&e->sysfs_lock);
678 error = e->ops ? entry->store(e, page, length) : -ENOENT;
679 mutex_unlock(&e->sysfs_lock);
680 return error;
681}
682
683static struct sysfs_ops elv_sysfs_ops = {
684 .show = elv_attr_show,
685 .store = elv_attr_store,
686};
687
688static struct kobj_type elv_ktype = {
689 .sysfs_ops = &elv_sysfs_ops,
690 .release = elevator_release,
691};
640 692
641 return kobject_register(&e->kobj); 693int elv_register_queue(struct request_queue *q)
694{
695 elevator_t *e = q->elevator;
696 int error;
697
698 e->kobj.parent = &q->kobj;
699
700 error = kobject_add(&e->kobj);
701 if (!error) {
702 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
703 if (attr) {
704 while (attr->attr.name) {
705 if (sysfs_create_file(&e->kobj, &attr->attr))
706 break;
707 attr++;
708 }
709 }
710 kobject_uevent(&e->kobj, KOBJ_ADD);
711 }
712 return error;
642} 713}
643 714
644void elv_unregister_queue(struct request_queue *q) 715void elv_unregister_queue(struct request_queue *q)
645{ 716{
646 if (q) { 717 if (q) {
647 elevator_t *e = q->elevator; 718 elevator_t *e = q->elevator;
648 kobject_unregister(&e->kobj); 719 kobject_uevent(&e->kobj, KOBJ_REMOVE);
649 kobject_put(&q->kobj); 720 kobject_del(&e->kobj);
650 } 721 }
651} 722}
652 723
@@ -675,21 +746,15 @@ void elv_unregister(struct elevator_type *e)
675 /* 746 /*
676 * Iterate every thread in the process to remove the io contexts. 747 * Iterate every thread in the process to remove the io contexts.
677 */ 748 */
678 read_lock(&tasklist_lock); 749 if (e->ops.trim) {
679 do_each_thread(g, p) { 750 read_lock(&tasklist_lock);
680 struct io_context *ioc = p->io_context; 751 do_each_thread(g, p) {
681 if (ioc && ioc->cic) { 752 task_lock(p);
682 ioc->cic->exit(ioc->cic); 753 e->ops.trim(p->io_context);
683 ioc->cic->dtor(ioc->cic); 754 task_unlock(p);
684 ioc->cic = NULL; 755 } while_each_thread(g, p);
685 } 756 read_unlock(&tasklist_lock);
686 if (ioc && ioc->aic) { 757 }
687 ioc->aic->exit(ioc->aic);
688 ioc->aic->dtor(ioc->aic);
689 ioc->aic = NULL;
690 }
691 } while_each_thread(g, p);
692 read_unlock(&tasklist_lock);
693 758
694 spin_lock_irq(&elv_list_lock); 759 spin_lock_irq(&elv_list_lock);
695 list_del_init(&e->list); 760 list_del_init(&e->list);
@@ -703,16 +768,16 @@ EXPORT_SYMBOL_GPL(elv_unregister);
703 * need for the new one. this way we have a chance of going back to the old 768 * need for the new one. this way we have a chance of going back to the old
704 * one, if the new one fails init for some reason. 769 * one, if the new one fails init for some reason.
705 */ 770 */
706static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) 771static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
707{ 772{
708 elevator_t *old_elevator, *e; 773 elevator_t *old_elevator, *e;
709 774
710 /* 775 /*
711 * Allocate new elevator 776 * Allocate new elevator
712 */ 777 */
713 e = kmalloc(sizeof(elevator_t), GFP_KERNEL); 778 e = elevator_alloc(new_e);
714 if (!e) 779 if (!e)
715 goto error; 780 return 0;
716 781
717 /* 782 /*
718 * Turn on BYPASS and drain all requests w/ elevator private data 783 * Turn on BYPASS and drain all requests w/ elevator private data
@@ -743,7 +808,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
743 /* 808 /*
744 * attach and start new elevator 809 * attach and start new elevator
745 */ 810 */
746 if (elevator_attach(q, new_e, e)) 811 if (elevator_attach(q, e))
747 goto fail; 812 goto fail;
748 813
749 if (elv_register_queue(q)) 814 if (elv_register_queue(q))
@@ -754,7 +819,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
754 */ 819 */
755 elevator_exit(old_elevator); 820 elevator_exit(old_elevator);
756 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 821 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
757 return; 822 return 1;
758 823
759fail_register: 824fail_register:
760 /* 825 /*
@@ -767,10 +832,9 @@ fail:
767 q->elevator = old_elevator; 832 q->elevator = old_elevator;
768 elv_register_queue(q); 833 elv_register_queue(q);
769 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 834 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
770 kfree(e); 835 if (e)
771error: 836 kobject_put(&e->kobj);
772 elevator_put(new_e); 837 return 0;
773 printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
774} 838}
775 839
776ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) 840ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
@@ -797,7 +861,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
797 return count; 861 return count;
798 } 862 }
799 863
800 elevator_switch(q, e); 864 if (!elevator_switch(q, e))
865 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
801 return count; 866 return count;
802} 867}
803 868
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0ef2971a9e82..6c793b196aa9 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue);
1740 * Hopefully the low level driver will have finished any 1740 * Hopefully the low level driver will have finished any
1741 * outstanding requests first... 1741 * outstanding requests first...
1742 **/ 1742 **/
1743void blk_cleanup_queue(request_queue_t * q) 1743static void blk_release_queue(struct kobject *kobj)
1744{ 1744{
1745 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
1745 struct request_list *rl = &q->rq; 1746 struct request_list *rl = &q->rq;
1746 1747
1747 if (!atomic_dec_and_test(&q->refcnt))
1748 return;
1749
1750 if (q->elevator)
1751 elevator_exit(q->elevator);
1752
1753 blk_sync_queue(q); 1748 blk_sync_queue(q);
1754 1749
1755 if (rl->rq_pool) 1750 if (rl->rq_pool)
@@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q)
1761 kmem_cache_free(requestq_cachep, q); 1756 kmem_cache_free(requestq_cachep, q);
1762} 1757}
1763 1758
1759void blk_put_queue(request_queue_t *q)
1760{
1761 kobject_put(&q->kobj);
1762}
1763EXPORT_SYMBOL(blk_put_queue);
1764
1765void blk_cleanup_queue(request_queue_t * q)
1766{
1767 mutex_lock(&q->sysfs_lock);
1768 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
1769 mutex_unlock(&q->sysfs_lock);
1770
1771 if (q->elevator)
1772 elevator_exit(q->elevator);
1773
1774 blk_put_queue(q);
1775}
1776
1764EXPORT_SYMBOL(blk_cleanup_queue); 1777EXPORT_SYMBOL(blk_cleanup_queue);
1765 1778
1766static int blk_init_free_list(request_queue_t *q) 1779static int blk_init_free_list(request_queue_t *q)
@@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
1788} 1801}
1789EXPORT_SYMBOL(blk_alloc_queue); 1802EXPORT_SYMBOL(blk_alloc_queue);
1790 1803
1804static struct kobj_type queue_ktype;
1805
1791request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 1806request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1792{ 1807{
1793 request_queue_t *q; 1808 request_queue_t *q;
@@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1798 1813
1799 memset(q, 0, sizeof(*q)); 1814 memset(q, 0, sizeof(*q));
1800 init_timer(&q->unplug_timer); 1815 init_timer(&q->unplug_timer);
1801 atomic_set(&q->refcnt, 1); 1816
1817 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
1818 q->kobj.ktype = &queue_ktype;
1819 kobject_init(&q->kobj);
1802 1820
1803 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; 1821 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
1804 q->backing_dev_info.unplug_io_data = q; 1822 q->backing_dev_info.unplug_io_data = q;
1805 1823
1824 mutex_init(&q->sysfs_lock);
1825
1806 return q; 1826 return q;
1807} 1827}
1808EXPORT_SYMBOL(blk_alloc_queue_node); 1828EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -1854,8 +1874,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1854 return NULL; 1874 return NULL;
1855 1875
1856 q->node = node_id; 1876 q->node = node_id;
1857 if (blk_init_free_list(q)) 1877 if (blk_init_free_list(q)) {
1858 goto out_init; 1878 kmem_cache_free(requestq_cachep, q);
1879 return NULL;
1880 }
1859 1881
1860 /* 1882 /*
1861 * if caller didn't supply a lock, they get per-queue locking with 1883 * if caller didn't supply a lock, they get per-queue locking with
@@ -1891,9 +1913,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1891 return q; 1913 return q;
1892 } 1914 }
1893 1915
1894 blk_cleanup_queue(q); 1916 blk_put_queue(q);
1895out_init:
1896 kmem_cache_free(requestq_cachep, q);
1897 return NULL; 1917 return NULL;
1898} 1918}
1899EXPORT_SYMBOL(blk_init_queue_node); 1919EXPORT_SYMBOL(blk_init_queue_node);
@@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
1901int blk_get_queue(request_queue_t *q) 1921int blk_get_queue(request_queue_t *q)
1902{ 1922{
1903 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 1923 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
1904 atomic_inc(&q->refcnt); 1924 kobject_get(&q->kobj);
1905 return 0; 1925 return 0;
1906 } 1926 }
1907 1927
@@ -3477,10 +3497,12 @@ void put_io_context(struct io_context *ioc)
3477 BUG_ON(atomic_read(&ioc->refcount) == 0); 3497 BUG_ON(atomic_read(&ioc->refcount) == 0);
3478 3498
3479 if (atomic_dec_and_test(&ioc->refcount)) { 3499 if (atomic_dec_and_test(&ioc->refcount)) {
3500 rcu_read_lock();
3480 if (ioc->aic && ioc->aic->dtor) 3501 if (ioc->aic && ioc->aic->dtor)
3481 ioc->aic->dtor(ioc->aic); 3502 ioc->aic->dtor(ioc->aic);
3482 if (ioc->cic && ioc->cic->dtor) 3503 if (ioc->cic && ioc->cic->dtor)
3483 ioc->cic->dtor(ioc->cic); 3504 ioc->cic->dtor(ioc->cic);
3505 rcu_read_unlock();
3484 3506
3485 kmem_cache_free(iocontext_cachep, ioc); 3507 kmem_cache_free(iocontext_cachep, ioc);
3486 } 3508 }
@@ -3614,10 +3636,13 @@ static ssize_t
3614queue_requests_store(struct request_queue *q, const char *page, size_t count) 3636queue_requests_store(struct request_queue *q, const char *page, size_t count)
3615{ 3637{
3616 struct request_list *rl = &q->rq; 3638 struct request_list *rl = &q->rq;
3639 unsigned long nr;
3640 int ret = queue_var_store(&nr, page, count);
3641 if (nr < BLKDEV_MIN_RQ)
3642 nr = BLKDEV_MIN_RQ;
3617 3643
3618 int ret = queue_var_store(&q->nr_requests, page, count); 3644 spin_lock_irq(q->queue_lock);
3619 if (q->nr_requests < BLKDEV_MIN_RQ) 3645 q->nr_requests = nr;
3620 q->nr_requests = BLKDEV_MIN_RQ;
3621 blk_queue_congestion_threshold(q); 3646 blk_queue_congestion_threshold(q);
3622 3647
3623 if (rl->count[READ] >= queue_congestion_on_threshold(q)) 3648 if (rl->count[READ] >= queue_congestion_on_threshold(q))
@@ -3643,6 +3668,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
3643 blk_clear_queue_full(q, WRITE); 3668 blk_clear_queue_full(q, WRITE);
3644 wake_up(&rl->wait[WRITE]); 3669 wake_up(&rl->wait[WRITE]);
3645 } 3670 }
3671 spin_unlock_irq(q->queue_lock);
3646 return ret; 3672 return ret;
3647} 3673}
3648 3674
@@ -3758,13 +3784,19 @@ static ssize_t
3758queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3784queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3759{ 3785{
3760 struct queue_sysfs_entry *entry = to_queue(attr); 3786 struct queue_sysfs_entry *entry = to_queue(attr);
3761 struct request_queue *q; 3787 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
3788 ssize_t res;
3762 3789
3763 q = container_of(kobj, struct request_queue, kobj);
3764 if (!entry->show) 3790 if (!entry->show)
3765 return -EIO; 3791 return -EIO;
3766 3792 mutex_lock(&q->sysfs_lock);
3767 return entry->show(q, page); 3793 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
3794 mutex_unlock(&q->sysfs_lock);
3795 return -ENOENT;
3796 }
3797 res = entry->show(q, page);
3798 mutex_unlock(&q->sysfs_lock);
3799 return res;
3768} 3800}
3769 3801
3770static ssize_t 3802static ssize_t
@@ -3772,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
3772 const char *page, size_t length) 3804 const char *page, size_t length)
3773{ 3805{
3774 struct queue_sysfs_entry *entry = to_queue(attr); 3806 struct queue_sysfs_entry *entry = to_queue(attr);
3775 struct request_queue *q; 3807 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
3808
3809 ssize_t res;
3776 3810
3777 q = container_of(kobj, struct request_queue, kobj);
3778 if (!entry->store) 3811 if (!entry->store)
3779 return -EIO; 3812 return -EIO;
3780 3813 mutex_lock(&q->sysfs_lock);
3781 return entry->store(q, page, length); 3814 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
3815 mutex_unlock(&q->sysfs_lock);
3816 return -ENOENT;
3817 }
3818 res = entry->store(q, page, length);
3819 mutex_unlock(&q->sysfs_lock);
3820 return res;
3782} 3821}
3783 3822
3784static struct sysfs_ops queue_sysfs_ops = { 3823static struct sysfs_ops queue_sysfs_ops = {
@@ -3789,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = {
3789static struct kobj_type queue_ktype = { 3828static struct kobj_type queue_ktype = {
3790 .sysfs_ops = &queue_sysfs_ops, 3829 .sysfs_ops = &queue_sysfs_ops,
3791 .default_attrs = default_attrs, 3830 .default_attrs = default_attrs,
3831 .release = blk_release_queue,
3792}; 3832};
3793 3833
3794int blk_register_queue(struct gendisk *disk) 3834int blk_register_queue(struct gendisk *disk)
@@ -3801,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk)
3801 return -ENXIO; 3841 return -ENXIO;
3802 3842
3803 q->kobj.parent = kobject_get(&disk->kobj); 3843 q->kobj.parent = kobject_get(&disk->kobj);
3804 if (!q->kobj.parent)
3805 return -EBUSY;
3806 3844
3807 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); 3845 ret = kobject_add(&q->kobj);
3808 q->kobj.ktype = &queue_ktype;
3809
3810 ret = kobject_register(&q->kobj);
3811 if (ret < 0) 3846 if (ret < 0)
3812 return ret; 3847 return ret;
3813 3848
3849 kobject_uevent(&q->kobj, KOBJ_ADD);
3850
3814 ret = elv_register_queue(q); 3851 ret = elv_register_queue(q);
3815 if (ret) { 3852 if (ret) {
3816 kobject_unregister(&q->kobj); 3853 kobject_uevent(&q->kobj, KOBJ_REMOVE);
3854 kobject_del(&q->kobj);
3817 return ret; 3855 return ret;
3818 } 3856 }
3819 3857
@@ -3827,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk)
3827 if (q && q->request_fn) { 3865 if (q && q->request_fn) {
3828 elv_unregister_queue(q); 3866 elv_unregister_queue(q);
3829 3867
3830 kobject_unregister(&q->kobj); 3868 kobject_uevent(&q->kobj, KOBJ_REMOVE);
3869 kobject_del(&q->kobj);
3831 kobject_put(&disk->kobj); 3870 kobject_put(&disk->kobj);
3832 } 3871 }
3833} 3872}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5f6d1a5cce11..0010704739e3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1307,7 +1307,7 @@ static int __init loop_init(void)
1307 1307
1308out_mem4: 1308out_mem4:
1309 while (i--) 1309 while (i--)
1310 blk_put_queue(loop_dev[i].lo_queue); 1310 blk_cleanup_queue(loop_dev[i].lo_queue);
1311 devfs_remove("loop"); 1311 devfs_remove("loop");
1312 i = max_loop; 1312 i = max_loop;
1313out_mem3: 1313out_mem3:
@@ -1328,7 +1328,7 @@ static void loop_exit(void)
1328 1328
1329 for (i = 0; i < max_loop; i++) { 1329 for (i = 0; i < max_loop; i++) {
1330 del_gendisk(disks[i]); 1330 del_gendisk(disks[i]);
1331 blk_put_queue(loop_dev[i].lo_queue); 1331 blk_cleanup_queue(loop_dev[i].lo_queue);
1332 put_disk(disks[i]); 1332 put_disk(disks[i]);
1333 } 1333 }
1334 devfs_remove("loop"); 1334 devfs_remove("loop");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index bc9b2bcd7dba..476a5b553f34 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2514,7 +2514,7 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2514 return 0; 2514 return 0;
2515 2515
2516out_new_dev: 2516out_new_dev:
2517 blk_put_queue(disk->queue); 2517 blk_cleanup_queue(disk->queue);
2518out_mem2: 2518out_mem2:
2519 put_disk(disk); 2519 put_disk(disk);
2520out_mem: 2520out_mem:
@@ -2555,7 +2555,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
2555 DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); 2555 DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
2556 2556
2557 del_gendisk(pd->disk); 2557 del_gendisk(pd->disk);
2558 blk_put_queue(pd->disk->queue); 2558 blk_cleanup_queue(pd->disk->queue);
2559 put_disk(pd->disk); 2559 put_disk(pd->disk);
2560 2560
2561 pkt_devs[idx] = NULL; 2561 pkt_devs[idx] = NULL;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 4ada1268b40d..c16e66b9c7a7 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -1131,7 +1131,7 @@ static void mm_pci_remove(struct pci_dev *dev)
1131 pci_free_consistent(card->dev, PAGE_SIZE*2, 1131 pci_free_consistent(card->dev, PAGE_SIZE*2,
1132 card->mm_pages[1].desc, 1132 card->mm_pages[1].desc,
1133 card->mm_pages[1].page_dma); 1133 card->mm_pages[1].page_dma);
1134 blk_put_queue(card->queue); 1134 blk_cleanup_queue(card->queue);
1135} 1135}
1136 1136
1137static const struct pci_device_id mm_pci_ids[] = { { 1137static const struct pci_device_id mm_pci_ids[] = { {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 745ca1f67b14..88d60202b9db 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -840,7 +840,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
840 bad3: 840 bad3:
841 mempool_destroy(md->io_pool); 841 mempool_destroy(md->io_pool);
842 bad2: 842 bad2:
843 blk_put_queue(md->queue); 843 blk_cleanup_queue(md->queue);
844 free_minor(minor); 844 free_minor(minor);
845 bad1: 845 bad1:
846 kfree(md); 846 kfree(md);
@@ -860,7 +860,7 @@ static void free_dev(struct mapped_device *md)
860 del_gendisk(md->disk); 860 del_gendisk(md->disk);
861 free_minor(minor); 861 free_minor(minor);
862 put_disk(md->disk); 862 put_disk(md->disk);
863 blk_put_queue(md->queue); 863 blk_cleanup_queue(md->queue);
864 kfree(md); 864 kfree(md);
865} 865}
866 866
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d05e3125d298..5ed2228745cb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -213,8 +213,11 @@ static void mddev_put(mddev_t *mddev)
213 return; 213 return;
214 if (!mddev->raid_disks && list_empty(&mddev->disks)) { 214 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
215 list_del(&mddev->all_mddevs); 215 list_del(&mddev->all_mddevs);
216 blk_put_queue(mddev->queue); 216 /* that blocks */
217 blk_cleanup_queue(mddev->queue);
218 /* that also blocks */
217 kobject_unregister(&mddev->kobj); 219 kobject_unregister(&mddev->kobj);
220 /* result blows... */
218 } 221 }
219 spin_unlock(&all_mddevs_lock); 222 spin_unlock(&all_mddevs_lock);
220} 223}
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index bd458cb9b4ea..61b89617a967 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,5 +1,6 @@
1saa7146-objs := saa7146_i2c.o saa7146_core.o 1saa7146-objs := saa7146_i2c.o saa7146_core.o
2saa7146_vv-objs := saa7146_vv_ksyms.o saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o 2saa7146_vv-objs := saa7146_vv_ksyms.o saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o
3ir-common-objs := ir-functions.o ir-keymaps.o
3 4
4obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o 5obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o
5obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o 6obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o
diff --git a/drivers/media/common/ir-common.c b/drivers/media/common/ir-common.c
deleted file mode 100644
index 97fa3fc571c4..000000000000
--- a/drivers/media/common/ir-common.c
+++ /dev/null
@@ -1,519 +0,0 @@
1/*
2 *
3 * some common structs and functions to handle infrared remotes via
4 * input layer ...
5 *
6 * (c) 2003 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/string.h>
26#include <media/ir-common.h>
27
28/* -------------------------------------------------------------------------- */
29
30MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
31MODULE_LICENSE("GPL");
32
33static int repeat = 1;
34module_param(repeat, int, 0444);
35MODULE_PARM_DESC(repeat,"auto-repeat for IR keys (default: on)");
36
37static int debug = 0; /* debug level (0,1,2) */
38module_param(debug, int, 0644);
39
40#define dprintk(level, fmt, arg...) if (debug >= level) \
41 printk(KERN_DEBUG fmt , ## arg)
42
43/* -------------------------------------------------------------------------- */
44
45/* generic RC5 keytable */
46/* see http://users.pandora.be/nenya/electronics/rc5/codes00.htm */
47/* used by old (black) Hauppauge remotes */
48IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE] = {
49 /* Keys 0 to 9 */
50 [ 0x00 ] = KEY_KP0,
51 [ 0x01 ] = KEY_KP1,
52 [ 0x02 ] = KEY_KP2,
53 [ 0x03 ] = KEY_KP3,
54 [ 0x04 ] = KEY_KP4,
55 [ 0x05 ] = KEY_KP5,
56 [ 0x06 ] = KEY_KP6,
57 [ 0x07 ] = KEY_KP7,
58 [ 0x08 ] = KEY_KP8,
59 [ 0x09 ] = KEY_KP9,
60
61 [ 0x0b ] = KEY_CHANNEL, /* channel / program (japan: 11) */
62 [ 0x0c ] = KEY_POWER, /* standby */
63 [ 0x0d ] = KEY_MUTE, /* mute / demute */
64 [ 0x0f ] = KEY_TV, /* display */
65 [ 0x10 ] = KEY_VOLUMEUP,
66 [ 0x11 ] = KEY_VOLUMEDOWN,
67 [ 0x12 ] = KEY_BRIGHTNESSUP,
68 [ 0x13 ] = KEY_BRIGHTNESSDOWN,
69 [ 0x1e ] = KEY_SEARCH, /* search + */
70 [ 0x20 ] = KEY_CHANNELUP, /* channel / program + */
71 [ 0x21 ] = KEY_CHANNELDOWN, /* channel / program - */
72 [ 0x22 ] = KEY_CHANNEL, /* alt / channel */
73 [ 0x23 ] = KEY_LANGUAGE, /* 1st / 2nd language */
74 [ 0x26 ] = KEY_SLEEP, /* sleeptimer */
75 [ 0x2e ] = KEY_MENU, /* 2nd controls (USA: menu) */
76 [ 0x30 ] = KEY_PAUSE,
77 [ 0x32 ] = KEY_REWIND,
78 [ 0x33 ] = KEY_GOTO,
79 [ 0x35 ] = KEY_PLAY,
80 [ 0x36 ] = KEY_STOP,
81 [ 0x37 ] = KEY_RECORD, /* recording */
82 [ 0x3c ] = KEY_TEXT, /* teletext submode (Japan: 12) */
83 [ 0x3d ] = KEY_SUSPEND, /* system standby */
84
85};
86EXPORT_SYMBOL_GPL(ir_codes_rc5_tv);
87
88/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
89IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE] = {
90 /* Keys 0 to 9 */
91 [ 18 ] = KEY_KP0,
92 [ 5 ] = KEY_KP1,
93 [ 6 ] = KEY_KP2,
94 [ 7 ] = KEY_KP3,
95 [ 9 ] = KEY_KP4,
96 [ 10 ] = KEY_KP5,
97 [ 11 ] = KEY_KP6,
98 [ 13 ] = KEY_KP7,
99 [ 14 ] = KEY_KP8,
100 [ 15 ] = KEY_KP9,
101
102 [ 0 ] = KEY_POWER,
103 [ 2 ] = KEY_TUNER, /* TV/FM */
104 [ 30 ] = KEY_VIDEO,
105 [ 4 ] = KEY_VOLUMEUP,
106 [ 8 ] = KEY_VOLUMEDOWN,
107 [ 12 ] = KEY_CHANNELUP,
108 [ 16 ] = KEY_CHANNELDOWN,
109 [ 3 ] = KEY_ZOOM, /* fullscreen */
110 [ 31 ] = KEY_SUBTITLE, /* closed caption/teletext */
111 [ 32 ] = KEY_SLEEP,
112 [ 20 ] = KEY_MUTE,
113 [ 43 ] = KEY_RED,
114 [ 44 ] = KEY_GREEN,
115 [ 45 ] = KEY_YELLOW,
116 [ 46 ] = KEY_BLUE,
117 [ 24 ] = KEY_KPPLUS, /* fine tune + */
118 [ 25 ] = KEY_KPMINUS, /* fine tune - */
119 [ 33 ] = KEY_KPDOT,
120 [ 19 ] = KEY_KPENTER,
121 [ 34 ] = KEY_BACK,
122 [ 35 ] = KEY_PLAYPAUSE,
123 [ 36 ] = KEY_NEXT,
124 [ 38 ] = KEY_STOP,
125 [ 39 ] = KEY_RECORD
126};
127EXPORT_SYMBOL_GPL(ir_codes_winfast);
128
129IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE] = {
130 [ 0x59 ] = KEY_MUTE,
131 [ 0x4a ] = KEY_POWER,
132
133 [ 0x18 ] = KEY_TEXT,
134 [ 0x26 ] = KEY_TV,
135 [ 0x3d ] = KEY_PRINT,
136
137 [ 0x48 ] = KEY_RED,
138 [ 0x04 ] = KEY_GREEN,
139 [ 0x11 ] = KEY_YELLOW,
140 [ 0x00 ] = KEY_BLUE,
141
142 [ 0x2d ] = KEY_VOLUMEUP,
143 [ 0x1e ] = KEY_VOLUMEDOWN,
144
145 [ 0x49 ] = KEY_MENU,
146
147 [ 0x16 ] = KEY_CHANNELUP,
148 [ 0x17 ] = KEY_CHANNELDOWN,
149
150 [ 0x20 ] = KEY_UP,
151 [ 0x21 ] = KEY_DOWN,
152 [ 0x22 ] = KEY_LEFT,
153 [ 0x23 ] = KEY_RIGHT,
154 [ 0x0d ] = KEY_SELECT,
155
156
157
158 [ 0x08 ] = KEY_BACK,
159 [ 0x07 ] = KEY_REFRESH,
160
161 [ 0x2f ] = KEY_ZOOM,
162 [ 0x29 ] = KEY_RECORD,
163
164 [ 0x4b ] = KEY_PAUSE,
165 [ 0x4d ] = KEY_REWIND,
166 [ 0x2e ] = KEY_PLAY,
167 [ 0x4e ] = KEY_FORWARD,
168 [ 0x53 ] = KEY_PREVIOUS,
169 [ 0x4c ] = KEY_STOP,
170 [ 0x54 ] = KEY_NEXT,
171
172 [ 0x69 ] = KEY_KP0,
173 [ 0x6a ] = KEY_KP1,
174 [ 0x6b ] = KEY_KP2,
175 [ 0x6c ] = KEY_KP3,
176 [ 0x6d ] = KEY_KP4,
177 [ 0x6e ] = KEY_KP5,
178 [ 0x6f ] = KEY_KP6,
179 [ 0x70 ] = KEY_KP7,
180 [ 0x71 ] = KEY_KP8,
181 [ 0x72 ] = KEY_KP9,
182
183 [ 0x74 ] = KEY_CHANNEL,
184 [ 0x0a ] = KEY_BACKSPACE,
185};
186
187EXPORT_SYMBOL_GPL(ir_codes_pinnacle);
188
189/* empty keytable, can be used as placeholder for not-yet created keytables */
190IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE] = {
191 [ 42 ] = KEY_COFFEE,
192};
193EXPORT_SYMBOL_GPL(ir_codes_empty);
194
195/* Hauppauge: the newer, gray remotes (seems there are multiple
196 * slightly different versions), shipped with cx88+ivtv cards.
197 * almost rc5 coding, but some non-standard keys */
198IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE] = {
199 /* Keys 0 to 9 */
200 [ 0x00 ] = KEY_KP0,
201 [ 0x01 ] = KEY_KP1,
202 [ 0x02 ] = KEY_KP2,
203 [ 0x03 ] = KEY_KP3,
204 [ 0x04 ] = KEY_KP4,
205 [ 0x05 ] = KEY_KP5,
206 [ 0x06 ] = KEY_KP6,
207 [ 0x07 ] = KEY_KP7,
208 [ 0x08 ] = KEY_KP8,
209 [ 0x09 ] = KEY_KP9,
210
211 [ 0x0a ] = KEY_TEXT, /* keypad asterisk as well */
212 [ 0x0b ] = KEY_RED, /* red button */
213 [ 0x0c ] = KEY_RADIO,
214 [ 0x0d ] = KEY_MENU,
215 [ 0x0e ] = KEY_SUBTITLE, /* also the # key */
216 [ 0x0f ] = KEY_MUTE,
217 [ 0x10 ] = KEY_VOLUMEUP,
218 [ 0x11 ] = KEY_VOLUMEDOWN,
219 [ 0x12 ] = KEY_PREVIOUS, /* previous channel */
220 [ 0x14 ] = KEY_UP,
221 [ 0x15 ] = KEY_DOWN,
222 [ 0x16 ] = KEY_LEFT,
223 [ 0x17 ] = KEY_RIGHT,
224 [ 0x18 ] = KEY_VIDEO, /* Videos */
225 [ 0x19 ] = KEY_AUDIO, /* Music */
226 /* 0x1a: Pictures - presume this means
227 "Multimedia Home Platform" -
228 no "PICTURES" key in input.h
229 */
230 [ 0x1a ] = KEY_MHP,
231
232 [ 0x1b ] = KEY_EPG, /* Guide */
233 [ 0x1c ] = KEY_TV,
234 [ 0x1e ] = KEY_NEXTSONG, /* skip >| */
235 [ 0x1f ] = KEY_EXIT, /* back/exit */
236 [ 0x20 ] = KEY_CHANNELUP, /* channel / program + */
237 [ 0x21 ] = KEY_CHANNELDOWN, /* channel / program - */
238 [ 0x22 ] = KEY_CHANNEL, /* source (old black remote) */
239 [ 0x24 ] = KEY_PREVIOUSSONG, /* replay |< */
240 [ 0x25 ] = KEY_ENTER, /* OK */
241 [ 0x26 ] = KEY_SLEEP, /* minimize (old black remote) */
242 [ 0x29 ] = KEY_BLUE, /* blue key */
243 [ 0x2e ] = KEY_GREEN, /* green button */
244 [ 0x30 ] = KEY_PAUSE, /* pause */
245 [ 0x32 ] = KEY_REWIND, /* backward << */
246 [ 0x34 ] = KEY_FASTFORWARD, /* forward >> */
247 [ 0x35 ] = KEY_PLAY,
248 [ 0x36 ] = KEY_STOP,
249 [ 0x37 ] = KEY_RECORD, /* recording */
250 [ 0x38 ] = KEY_YELLOW, /* yellow key */
251 [ 0x3b ] = KEY_SELECT, /* top right button */
252 [ 0x3c ] = KEY_ZOOM, /* full */
253 [ 0x3d ] = KEY_POWER, /* system power (green button) */
254};
255EXPORT_SYMBOL(ir_codes_hauppauge_new);
256
257IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE] = {
258 [ 2 ] = KEY_KP0,
259 [ 1 ] = KEY_KP1,
260 [ 11 ] = KEY_KP2,
261 [ 27 ] = KEY_KP3,
262 [ 5 ] = KEY_KP4,
263 [ 9 ] = KEY_KP5,
264 [ 21 ] = KEY_KP6,
265 [ 6 ] = KEY_KP7,
266 [ 10 ] = KEY_KP8,
267 [ 18 ] = KEY_KP9,
268
269 [ 3 ] = KEY_TUNER, /* TV/FM */
270 [ 7 ] = KEY_SEARCH, /* scan */
271 [ 28 ] = KEY_ZOOM, /* full screen */
272 [ 30 ] = KEY_POWER,
273 [ 23 ] = KEY_VOLUMEDOWN,
274 [ 31 ] = KEY_VOLUMEUP,
275 [ 20 ] = KEY_CHANNELDOWN,
276 [ 22 ] = KEY_CHANNELUP,
277 [ 24 ] = KEY_MUTE,
278
279 [ 0 ] = KEY_LIST, /* source */
280 [ 19 ] = KEY_INFO, /* loop */
281 [ 16 ] = KEY_LAST, /* +100 */
282 [ 13 ] = KEY_CLEAR, /* reset */
283 [ 12 ] = BTN_RIGHT, /* fun++ */
284 [ 4 ] = BTN_LEFT, /* fun-- */
285 [ 14 ] = KEY_GOTO, /* function */
286 [ 15 ] = KEY_STOP, /* freeze */
287};
288EXPORT_SYMBOL(ir_codes_pixelview);
289
290/* -------------------------------------------------------------------------- */
291
292static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir)
293{
294 if (KEY_RESERVED == ir->keycode) {
295 printk(KERN_INFO "%s: unknown key: key=0x%02x raw=0x%02x down=%d\n",
296 dev->name,ir->ir_key,ir->ir_raw,ir->keypressed);
297 return;
298 }
299 dprintk(1,"%s: key event code=%d down=%d\n",
300 dev->name,ir->keycode,ir->keypressed);
301 input_report_key(dev,ir->keycode,ir->keypressed);
302 input_sync(dev);
303}
304
305/* -------------------------------------------------------------------------- */
306
307void ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
308 int ir_type, IR_KEYTAB_TYPE *ir_codes)
309{
310 int i;
311
312 ir->ir_type = ir_type;
313 if (ir_codes)
314 memcpy(ir->ir_codes, ir_codes, sizeof(ir->ir_codes));
315
316
317 dev->keycode = ir->ir_codes;
318 dev->keycodesize = sizeof(IR_KEYTAB_TYPE);
319 dev->keycodemax = IR_KEYTAB_SIZE;
320 for (i = 0; i < IR_KEYTAB_SIZE; i++)
321 set_bit(ir->ir_codes[i], dev->keybit);
322 clear_bit(0, dev->keybit);
323
324 set_bit(EV_KEY, dev->evbit);
325 if (repeat)
326 set_bit(EV_REP, dev->evbit);
327}
328
329void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir)
330{
331 if (ir->keypressed) {
332 ir->keypressed = 0;
333 ir_input_key_event(dev,ir);
334 }
335}
336
337void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir,
338 u32 ir_key, u32 ir_raw)
339{
340 u32 keycode = IR_KEYCODE(ir->ir_codes, ir_key);
341
342 if (ir->keypressed && ir->keycode != keycode) {
343 ir->keypressed = 0;
344 ir_input_key_event(dev,ir);
345 }
346 if (!ir->keypressed) {
347 ir->ir_key = ir_key;
348 ir->ir_raw = ir_raw;
349 ir->keycode = keycode;
350 ir->keypressed = 1;
351 ir_input_key_event(dev,ir);
352 }
353}
354
355/* -------------------------------------------------------------------------- */
356
357u32 ir_extract_bits(u32 data, u32 mask)
358{
359 int mbit, vbit;
360 u32 value;
361
362 value = 0;
363 vbit = 0;
364 for (mbit = 0; mbit < 32; mbit++) {
365 if (!(mask & ((u32)1 << mbit)))
366 continue;
367 if (data & ((u32)1 << mbit))
368 value |= (1 << vbit);
369 vbit++;
370 }
371 return value;
372}
373
374static int inline getbit(u32 *samples, int bit)
375{
376 return (samples[bit/32] & (1 << (31-(bit%32)))) ? 1 : 0;
377}
378
379/* sump raw samples for visual debugging ;) */
380int ir_dump_samples(u32 *samples, int count)
381{
382 int i, bit, start;
383
384 printk(KERN_DEBUG "ir samples: ");
385 start = 0;
386 for (i = 0; i < count * 32; i++) {
387 bit = getbit(samples,i);
388 if (bit)
389 start = 1;
390 if (0 == start)
391 continue;
392 printk("%s", bit ? "#" : "_");
393 }
394 printk("\n");
395 return 0;
396}
397
398/* decode raw samples, pulse distance coding used by NEC remotes */
399int ir_decode_pulsedistance(u32 *samples, int count, int low, int high)
400{
401 int i,last,bit,len;
402 u32 curBit;
403 u32 value;
404
405 /* find start burst */
406 for (i = len = 0; i < count * 32; i++) {
407 bit = getbit(samples,i);
408 if (bit) {
409 len++;
410 } else {
411 if (len >= 29)
412 break;
413 len = 0;
414 }
415 }
416
417 /* start burst to short */
418 if (len < 29)
419 return 0xffffffff;
420
421 /* find start silence */
422 for (len = 0; i < count * 32; i++) {
423 bit = getbit(samples,i);
424 if (bit) {
425 break;
426 } else {
427 len++;
428 }
429 }
430
431 /* silence to short */
432 if (len < 7)
433 return 0xffffffff;
434
435 /* go decoding */
436 len = 0;
437 last = 1;
438 value = 0; curBit = 1;
439 for (; i < count * 32; i++) {
440 bit = getbit(samples,i);
441 if (last) {
442 if(bit) {
443 continue;
444 } else {
445 len = 1;
446 }
447 } else {
448 if (bit) {
449 if (len > (low + high) /2)
450 value |= curBit;
451 curBit <<= 1;
452 if (curBit == 1)
453 break;
454 } else {
455 len++;
456 }
457 }
458 last = bit;
459 }
460
461 return value;
462}
463
464/* decode raw samples, biphase coding, used by rc5 for example */
465int ir_decode_biphase(u32 *samples, int count, int low, int high)
466{
467 int i,last,bit,len,flips;
468 u32 value;
469
470 /* find start bit (1) */
471 for (i = 0; i < 32; i++) {
472 bit = getbit(samples,i);
473 if (bit)
474 break;
475 }
476
477 /* go decoding */
478 len = 0;
479 flips = 0;
480 value = 1;
481 for (; i < count * 32; i++) {
482 if (len > high)
483 break;
484 if (flips > 1)
485 break;
486 last = bit;
487 bit = getbit(samples,i);
488 if (last == bit) {
489 len++;
490 continue;
491 }
492 if (len < low) {
493 len++;
494 flips++;
495 continue;
496 }
497 value <<= 1;
498 value |= bit;
499 flips = 0;
500 len = 1;
501 }
502 return value;
503}
504
505EXPORT_SYMBOL_GPL(ir_input_init);
506EXPORT_SYMBOL_GPL(ir_input_nokey);
507EXPORT_SYMBOL_GPL(ir_input_keydown);
508
509EXPORT_SYMBOL_GPL(ir_extract_bits);
510EXPORT_SYMBOL_GPL(ir_dump_samples);
511EXPORT_SYMBOL_GPL(ir_decode_biphase);
512EXPORT_SYMBOL_GPL(ir_decode_pulsedistance);
513
514/*
515 * Local variables:
516 * c-basic-offset: 8
517 * End:
518 */
519
diff --git a/drivers/media/common/ir-functions.c b/drivers/media/common/ir-functions.c
new file mode 100644
index 000000000000..397cff8b345b
--- /dev/null
+++ b/drivers/media/common/ir-functions.c
@@ -0,0 +1,272 @@
1/*
2 *
3 * some common structs and functions to handle infrared remotes via
4 * input layer ...
5 *
6 * (c) 2003 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/string.h>
26#include <media/ir-common.h>
27
28/* -------------------------------------------------------------------------- */
29
30MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
31MODULE_LICENSE("GPL");
32
33static int repeat = 1;
34module_param(repeat, int, 0444);
35MODULE_PARM_DESC(repeat,"auto-repeat for IR keys (default: on)");
36
37static int debug = 0; /* debug level (0,1,2) */
38module_param(debug, int, 0644);
39
40#define dprintk(level, fmt, arg...) if (debug >= level) \
41 printk(KERN_DEBUG fmt , ## arg)
42
43/* -------------------------------------------------------------------------- */
44
45static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir)
46{
47 if (KEY_RESERVED == ir->keycode) {
48 printk(KERN_INFO "%s: unknown key: key=0x%02x raw=0x%02x down=%d\n",
49 dev->name,ir->ir_key,ir->ir_raw,ir->keypressed);
50 return;
51 }
52 dprintk(1,"%s: key event code=%d down=%d\n",
53 dev->name,ir->keycode,ir->keypressed);
54 input_report_key(dev,ir->keycode,ir->keypressed);
55 input_sync(dev);
56}
57
58/* -------------------------------------------------------------------------- */
59
60void ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
61 int ir_type, IR_KEYTAB_TYPE *ir_codes)
62{
63 int i;
64
65 ir->ir_type = ir_type;
66 if (ir_codes)
67 memcpy(ir->ir_codes, ir_codes, sizeof(ir->ir_codes));
68
69
70 dev->keycode = ir->ir_codes;
71 dev->keycodesize = sizeof(IR_KEYTAB_TYPE);
72 dev->keycodemax = IR_KEYTAB_SIZE;
73 for (i = 0; i < IR_KEYTAB_SIZE; i++)
74 set_bit(ir->ir_codes[i], dev->keybit);
75 clear_bit(0, dev->keybit);
76
77 set_bit(EV_KEY, dev->evbit);
78 if (repeat)
79 set_bit(EV_REP, dev->evbit);
80}
81
82void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir)
83{
84 if (ir->keypressed) {
85 ir->keypressed = 0;
86 ir_input_key_event(dev,ir);
87 }
88}
89
90void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir,
91 u32 ir_key, u32 ir_raw)
92{
93 u32 keycode = IR_KEYCODE(ir->ir_codes, ir_key);
94
95 if (ir->keypressed && ir->keycode != keycode) {
96 ir->keypressed = 0;
97 ir_input_key_event(dev,ir);
98 }
99 if (!ir->keypressed) {
100 ir->ir_key = ir_key;
101 ir->ir_raw = ir_raw;
102 ir->keycode = keycode;
103 ir->keypressed = 1;
104 ir_input_key_event(dev,ir);
105 }
106}
107
108/* -------------------------------------------------------------------------- */
109
110u32 ir_extract_bits(u32 data, u32 mask)
111{
112 int mbit, vbit;
113 u32 value;
114
115 value = 0;
116 vbit = 0;
117 for (mbit = 0; mbit < 32; mbit++) {
118 if (!(mask & ((u32)1 << mbit)))
119 continue;
120 if (data & ((u32)1 << mbit))
121 value |= (1 << vbit);
122 vbit++;
123 }
124 return value;
125}
126
127static int inline getbit(u32 *samples, int bit)
128{
129 return (samples[bit/32] & (1 << (31-(bit%32)))) ? 1 : 0;
130}
131
132/* sump raw samples for visual debugging ;) */
133int ir_dump_samples(u32 *samples, int count)
134{
135 int i, bit, start;
136
137 printk(KERN_DEBUG "ir samples: ");
138 start = 0;
139 for (i = 0; i < count * 32; i++) {
140 bit = getbit(samples,i);
141 if (bit)
142 start = 1;
143 if (0 == start)
144 continue;
145 printk("%s", bit ? "#" : "_");
146 }
147 printk("\n");
148 return 0;
149}
150
151/* decode raw samples, pulse distance coding used by NEC remotes */
152int ir_decode_pulsedistance(u32 *samples, int count, int low, int high)
153{
154 int i,last,bit,len;
155 u32 curBit;
156 u32 value;
157
158 /* find start burst */
159 for (i = len = 0; i < count * 32; i++) {
160 bit = getbit(samples,i);
161 if (bit) {
162 len++;
163 } else {
164 if (len >= 29)
165 break;
166 len = 0;
167 }
168 }
169
170 /* start burst to short */
171 if (len < 29)
172 return 0xffffffff;
173
174 /* find start silence */
175 for (len = 0; i < count * 32; i++) {
176 bit = getbit(samples,i);
177 if (bit) {
178 break;
179 } else {
180 len++;
181 }
182 }
183
184 /* silence to short */
185 if (len < 7)
186 return 0xffffffff;
187
188 /* go decoding */
189 len = 0;
190 last = 1;
191 value = 0; curBit = 1;
192 for (; i < count * 32; i++) {
193 bit = getbit(samples,i);
194 if (last) {
195 if(bit) {
196 continue;
197 } else {
198 len = 1;
199 }
200 } else {
201 if (bit) {
202 if (len > (low + high) /2)
203 value |= curBit;
204 curBit <<= 1;
205 if (curBit == 1)
206 break;
207 } else {
208 len++;
209 }
210 }
211 last = bit;
212 }
213
214 return value;
215}
216
217/* decode raw samples, biphase coding, used by rc5 for example */
218int ir_decode_biphase(u32 *samples, int count, int low, int high)
219{
220 int i,last,bit,len,flips;
221 u32 value;
222
223 /* find start bit (1) */
224 for (i = 0; i < 32; i++) {
225 bit = getbit(samples,i);
226 if (bit)
227 break;
228 }
229
230 /* go decoding */
231 len = 0;
232 flips = 0;
233 value = 1;
234 for (; i < count * 32; i++) {
235 if (len > high)
236 break;
237 if (flips > 1)
238 break;
239 last = bit;
240 bit = getbit(samples,i);
241 if (last == bit) {
242 len++;
243 continue;
244 }
245 if (len < low) {
246 len++;
247 flips++;
248 continue;
249 }
250 value <<= 1;
251 value |= bit;
252 flips = 0;
253 len = 1;
254 }
255 return value;
256}
257
258EXPORT_SYMBOL_GPL(ir_input_init);
259EXPORT_SYMBOL_GPL(ir_input_nokey);
260EXPORT_SYMBOL_GPL(ir_input_keydown);
261
262EXPORT_SYMBOL_GPL(ir_extract_bits);
263EXPORT_SYMBOL_GPL(ir_dump_samples);
264EXPORT_SYMBOL_GPL(ir_decode_biphase);
265EXPORT_SYMBOL_GPL(ir_decode_pulsedistance);
266
267/*
268 * Local variables:
269 * c-basic-offset: 8
270 * End:
271 */
272
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
new file mode 100644
index 000000000000..a294d5c2c73f
--- /dev/null
+++ b/drivers/media/common/ir-keymaps.c
@@ -0,0 +1,1415 @@
1/*
2
3
4 Keytables for supported remote controls. This file is part of
5 video4linux.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21 */
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24
25#include <linux/input.h>
26#include <media/ir-common.h>
27
28/* empty keytable, can be used as placeholder for not-yet created keytables */
29IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE] = {
30 [ 0x2a ] = KEY_COFFEE,
31};
32
33EXPORT_SYMBOL_GPL(ir_codes_empty);
34
35/* Matt Jesson <dvb@jesson.eclipse.co.uk */
36IR_KEYTAB_TYPE ir_codes_avermedia_dvbt[IR_KEYTAB_SIZE] = {
37 [ 0x28 ] = KEY_0, //'0' / 'enter'
38 [ 0x22 ] = KEY_1, //'1'
39 [ 0x12 ] = KEY_2, //'2' / 'up arrow'
40 [ 0x32 ] = KEY_3, //'3'
41 [ 0x24 ] = KEY_4, //'4' / 'left arrow'
42 [ 0x14 ] = KEY_5, //'5'
43 [ 0x34 ] = KEY_6, //'6' / 'right arrow'
44 [ 0x26 ] = KEY_7, //'7'
45 [ 0x16 ] = KEY_8, //'8' / 'down arrow'
46 [ 0x36 ] = KEY_9, //'9'
47
48 [ 0x20 ] = KEY_LIST, // 'source'
49 [ 0x10 ] = KEY_TEXT, // 'teletext'
50 [ 0x00 ] = KEY_POWER, // 'power'
51 [ 0x04 ] = KEY_AUDIO, // 'audio'
52 [ 0x06 ] = KEY_ZOOM, // 'full screen'
53 [ 0x18 ] = KEY_VIDEO, // 'display'
54 [ 0x38 ] = KEY_SEARCH, // 'loop'
55 [ 0x08 ] = KEY_INFO, // 'preview'
56 [ 0x2a ] = KEY_REWIND, // 'backward <<'
57 [ 0x1a ] = KEY_FASTFORWARD, // 'forward >>'
58 [ 0x3a ] = KEY_RECORD, // 'capture'
59 [ 0x0a ] = KEY_MUTE, // 'mute'
60 [ 0x2c ] = KEY_RECORD, // 'record'
61 [ 0x1c ] = KEY_PAUSE, // 'pause'
62 [ 0x3c ] = KEY_STOP, // 'stop'
63 [ 0x0c ] = KEY_PLAY, // 'play'
64 [ 0x2e ] = KEY_RED, // 'red'
65 [ 0x01 ] = KEY_BLUE, // 'blue' / 'cancel'
66 [ 0x0e ] = KEY_YELLOW, // 'yellow' / 'ok'
67 [ 0x21 ] = KEY_GREEN, // 'green'
68 [ 0x11 ] = KEY_CHANNELDOWN, // 'channel -'
69 [ 0x31 ] = KEY_CHANNELUP, // 'channel +'
70 [ 0x1e ] = KEY_VOLUMEDOWN, // 'volume -'
71 [ 0x3e ] = KEY_VOLUMEUP, // 'volume +'
72};
73
74EXPORT_SYMBOL_GPL(ir_codes_avermedia_dvbt);
75
76/* Attila Kondoros <attila.kondoros@chello.hu> */
77IR_KEYTAB_TYPE ir_codes_apac_viewcomp[IR_KEYTAB_SIZE] = {
78
79 [ 0x01 ] = KEY_1,
80 [ 0x02 ] = KEY_2,
81 [ 0x03 ] = KEY_3,
82 [ 0x04 ] = KEY_4,
83 [ 0x05 ] = KEY_5,
84 [ 0x06 ] = KEY_6,
85 [ 0x07 ] = KEY_7,
86 [ 0x08 ] = KEY_8,
87 [ 0x09 ] = KEY_9,
88 [ 0x00 ] = KEY_0,
89 [ 0x17 ] = KEY_LAST, // +100
90 [ 0x0a ] = KEY_LIST, // recall
91
92
93 [ 0x1c ] = KEY_TUNER, // TV/FM
94 [ 0x15 ] = KEY_SEARCH, // scan
95 [ 0x12 ] = KEY_POWER, // power
96 [ 0x1f ] = KEY_VOLUMEDOWN, // vol up
97 [ 0x1b ] = KEY_VOLUMEUP, // vol down
98 [ 0x1e ] = KEY_CHANNELDOWN, // chn up
99 [ 0x1a ] = KEY_CHANNELUP, // chn down
100
101 [ 0x11 ] = KEY_VIDEO, // video
102 [ 0x0f ] = KEY_ZOOM, // full screen
103 [ 0x13 ] = KEY_MUTE, // mute/unmute
104 [ 0x10 ] = KEY_TEXT, // min
105
106 [ 0x0d ] = KEY_STOP, // freeze
107 [ 0x0e ] = KEY_RECORD, // record
108 [ 0x1d ] = KEY_PLAYPAUSE, // stop
109 [ 0x19 ] = KEY_PLAY, // play
110
111 [ 0x16 ] = KEY_GOTO, // osd
112 [ 0x14 ] = KEY_REFRESH, // default
113 [ 0x0c ] = KEY_KPPLUS, // fine tune >>>>
114 [ 0x18 ] = KEY_KPMINUS // fine tune <<<<
115};
116
117EXPORT_SYMBOL_GPL(ir_codes_apac_viewcomp);
118
119/* ---------------------------------------------------------------------- */
120
121IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE] = {
122
123 [ 0x1e ] = KEY_POWER, // power
124 [ 0x07 ] = KEY_MEDIA, // source
125 [ 0x1c ] = KEY_SEARCH, // scan
126
127/* FIXME: duplicate keycodes?
128 *
129 * These four keys seem to share the same GPIO as CH+, CH-, <<< and >>>
130 * The GPIO values are
131 * 6397fb for both "Scan <" and "CH -",
132 * 639ffb for "Scan >" and "CH+",
133 * 6384fb for "Tune <" and "<<<",
134 * 638cfb for "Tune >" and ">>>", regardless of the mask.
135 *
136 * [ 0x17 ] = KEY_BACK, // fm scan <<
137 * [ 0x1f ] = KEY_FORWARD, // fm scan >>
138 *
139 * [ 0x04 ] = KEY_LEFT, // fm tuning <
140 * [ 0x0c ] = KEY_RIGHT, // fm tuning >
141 *
142 * For now, these four keys are disabled. Pressing them will generate
143 * the CH+/CH-/<<</>>> events
144 */
145
146 [ 0x03 ] = KEY_TUNER, // TV/FM
147
148 [ 0x00 ] = KEY_RECORD,
149 [ 0x08 ] = KEY_STOP,
150 [ 0x11 ] = KEY_PLAY,
151
152 [ 0x1a ] = KEY_PLAYPAUSE, // freeze
153 [ 0x19 ] = KEY_ZOOM, // zoom
154 [ 0x0f ] = KEY_TEXT, // min
155
156 [ 0x01 ] = KEY_1,
157 [ 0x0b ] = KEY_2,
158 [ 0x1b ] = KEY_3,
159 [ 0x05 ] = KEY_4,
160 [ 0x09 ] = KEY_5,
161 [ 0x15 ] = KEY_6,
162 [ 0x06 ] = KEY_7,
163 [ 0x0a ] = KEY_8,
164 [ 0x12 ] = KEY_9,
165 [ 0x02 ] = KEY_0,
166 [ 0x10 ] = KEY_LAST, // +100
167 [ 0x13 ] = KEY_LIST, // recall
168
169 [ 0x1f ] = KEY_CHANNELUP, // chn down
170 [ 0x17 ] = KEY_CHANNELDOWN, // chn up
171 [ 0x16 ] = KEY_VOLUMEUP, // vol down
172 [ 0x14 ] = KEY_VOLUMEDOWN, // vol up
173
174 [ 0x04 ] = KEY_KPMINUS, // <<<
175 [ 0x0e ] = KEY_SETUP, // function
176 [ 0x0c ] = KEY_KPPLUS, // >>>
177
178 [ 0x0d ] = KEY_GOTO, // mts
179 [ 0x1d ] = KEY_REFRESH, // reset
180 [ 0x18 ] = KEY_MUTE // mute/unmute
181};
182
183EXPORT_SYMBOL_GPL(ir_codes_pixelview);
184
185IR_KEYTAB_TYPE ir_codes_nebula[IR_KEYTAB_SIZE] = {
186 [ 0x00 ] = KEY_0,
187 [ 0x01 ] = KEY_1,
188 [ 0x02 ] = KEY_2,
189 [ 0x03 ] = KEY_3,
190 [ 0x04 ] = KEY_4,
191 [ 0x05 ] = KEY_5,
192 [ 0x06 ] = KEY_6,
193 [ 0x07 ] = KEY_7,
194 [ 0x08 ] = KEY_8,
195 [ 0x09 ] = KEY_9,
196 [ 0x0a ] = KEY_TV,
197 [ 0x0b ] = KEY_AUX,
198 [ 0x0c ] = KEY_DVD,
199 [ 0x0d ] = KEY_POWER,
200 [ 0x0e ] = KEY_MHP, /* labelled 'Picture' */
201 [ 0x0f ] = KEY_AUDIO,
202 [ 0x10 ] = KEY_INFO,
203 [ 0x11 ] = KEY_F13, /* 16:9 */
204 [ 0x12 ] = KEY_F14, /* 14:9 */
205 [ 0x13 ] = KEY_EPG,
206 [ 0x14 ] = KEY_EXIT,
207 [ 0x15 ] = KEY_MENU,
208 [ 0x16 ] = KEY_UP,
209 [ 0x17 ] = KEY_DOWN,
210 [ 0x18 ] = KEY_LEFT,
211 [ 0x19 ] = KEY_RIGHT,
212 [ 0x1a ] = KEY_ENTER,
213 [ 0x1b ] = KEY_CHANNELUP,
214 [ 0x1c ] = KEY_CHANNELDOWN,
215 [ 0x1d ] = KEY_VOLUMEUP,
216 [ 0x1e ] = KEY_VOLUMEDOWN,
217 [ 0x1f ] = KEY_RED,
218 [ 0x20 ] = KEY_GREEN,
219 [ 0x21 ] = KEY_YELLOW,
220 [ 0x22 ] = KEY_BLUE,
221 [ 0x23 ] = KEY_SUBTITLE,
222 [ 0x24 ] = KEY_F15, /* AD */
223 [ 0x25 ] = KEY_TEXT,
224 [ 0x26 ] = KEY_MUTE,
225 [ 0x27 ] = KEY_REWIND,
226 [ 0x28 ] = KEY_STOP,
227 [ 0x29 ] = KEY_PLAY,
228 [ 0x2a ] = KEY_FASTFORWARD,
229 [ 0x2b ] = KEY_F16, /* chapter */
230 [ 0x2c ] = KEY_PAUSE,
231 [ 0x2d ] = KEY_PLAY,
232 [ 0x2e ] = KEY_RECORD,
233 [ 0x2f ] = KEY_F17, /* picture in picture */
234 [ 0x30 ] = KEY_KPPLUS, /* zoom in */
235 [ 0x31 ] = KEY_KPMINUS, /* zoom out */
236 [ 0x32 ] = KEY_F18, /* capture */
237 [ 0x33 ] = KEY_F19, /* web */
238 [ 0x34 ] = KEY_EMAIL,
239 [ 0x35 ] = KEY_PHONE,
240 [ 0x36 ] = KEY_PC
241};
242
243EXPORT_SYMBOL_GPL(ir_codes_nebula);
244
245/* DigitalNow DNTV Live DVB-T Remote */
246IR_KEYTAB_TYPE ir_codes_dntv_live_dvb_t[IR_KEYTAB_SIZE] = {
247 [ 0x00 ] = KEY_ESC, /* 'go up a level?' */
248 /* Keys 0 to 9 */
249 [ 0x0a ] = KEY_0,
250 [ 0x01 ] = KEY_1,
251 [ 0x02 ] = KEY_2,
252 [ 0x03 ] = KEY_3,
253 [ 0x04 ] = KEY_4,
254 [ 0x05 ] = KEY_5,
255 [ 0x06 ] = KEY_6,
256 [ 0x07 ] = KEY_7,
257 [ 0x08 ] = KEY_8,
258 [ 0x09 ] = KEY_9,
259
260 [ 0x0b ] = KEY_TUNER, /* tv/fm */
261 [ 0x0c ] = KEY_SEARCH, /* scan */
262 [ 0x0d ] = KEY_STOP,
263 [ 0x0e ] = KEY_PAUSE,
264 [ 0x0f ] = KEY_LIST, /* source */
265
266 [ 0x10 ] = KEY_MUTE,
267 [ 0x11 ] = KEY_REWIND, /* backward << */
268 [ 0x12 ] = KEY_POWER,
269 [ 0x13 ] = KEY_S, /* snap */
270 [ 0x14 ] = KEY_AUDIO, /* stereo */
271 [ 0x15 ] = KEY_CLEAR, /* reset */
272 [ 0x16 ] = KEY_PLAY,
273 [ 0x17 ] = KEY_ENTER,
274 [ 0x18 ] = KEY_ZOOM, /* full screen */
275 [ 0x19 ] = KEY_FASTFORWARD, /* forward >> */
276 [ 0x1a ] = KEY_CHANNELUP,
277 [ 0x1b ] = KEY_VOLUMEUP,
278 [ 0x1c ] = KEY_INFO, /* preview */
279 [ 0x1d ] = KEY_RECORD, /* record */
280 [ 0x1e ] = KEY_CHANNELDOWN,
281 [ 0x1f ] = KEY_VOLUMEDOWN,
282};
283
284EXPORT_SYMBOL_GPL(ir_codes_dntv_live_dvb_t);
285
286/* ---------------------------------------------------------------------- */
287
288/* IO-DATA BCTV7E Remote */
289IR_KEYTAB_TYPE ir_codes_iodata_bctv7e[IR_KEYTAB_SIZE] = {
290 [ 0x40 ] = KEY_TV,
291 [ 0x20 ] = KEY_RADIO, /* FM */
292 [ 0x60 ] = KEY_EPG,
293 [ 0x00 ] = KEY_POWER,
294
295 /* Keys 0 to 9 */
296 [ 0x44 ] = KEY_0, /* 10 */
297 [ 0x50 ] = KEY_1,
298 [ 0x30 ] = KEY_2,
299 [ 0x70 ] = KEY_3,
300 [ 0x48 ] = KEY_4,
301 [ 0x28 ] = KEY_5,
302 [ 0x68 ] = KEY_6,
303 [ 0x58 ] = KEY_7,
304 [ 0x38 ] = KEY_8,
305 [ 0x78 ] = KEY_9,
306
307 [ 0x10 ] = KEY_L, /* Live */
308 [ 0x08 ] = KEY_T, /* Time Shift */
309
310 [ 0x18 ] = KEY_PLAYPAUSE, /* Play */
311
312 [ 0x24 ] = KEY_ENTER, /* 11 */
313 [ 0x64 ] = KEY_ESC, /* 12 */
314 [ 0x04 ] = KEY_M, /* Multi */
315
316 [ 0x54 ] = KEY_VIDEO,
317 [ 0x34 ] = KEY_CHANNELUP,
318 [ 0x74 ] = KEY_VOLUMEUP,
319 [ 0x14 ] = KEY_MUTE,
320
321 [ 0x4c ] = KEY_S, /* SVIDEO */
322 [ 0x2c ] = KEY_CHANNELDOWN,
323 [ 0x6c ] = KEY_VOLUMEDOWN,
324 [ 0x0c ] = KEY_ZOOM,
325
326 [ 0x5c ] = KEY_PAUSE,
327 [ 0x3c ] = KEY_C, /* || (red) */
328 [ 0x7c ] = KEY_RECORD, /* recording */
329 [ 0x1c ] = KEY_STOP,
330
331 [ 0x41 ] = KEY_REWIND, /* backward << */
332 [ 0x21 ] = KEY_PLAY,
333 [ 0x61 ] = KEY_FASTFORWARD, /* forward >> */
334 [ 0x01 ] = KEY_NEXT, /* skip >| */
335};
336
337EXPORT_SYMBOL_GPL(ir_codes_iodata_bctv7e);
338
339/* ---------------------------------------------------------------------- */
340
341/* ADS Tech Instant TV DVB-T PCI Remote */
342IR_KEYTAB_TYPE ir_codes_adstech_dvb_t_pci[IR_KEYTAB_SIZE] = {
343 /* Keys 0 to 9 */
344 [ 0x4d ] = KEY_0,
345 [ 0x57 ] = KEY_1,
346 [ 0x4f ] = KEY_2,
347 [ 0x53 ] = KEY_3,
348 [ 0x56 ] = KEY_4,
349 [ 0x4e ] = KEY_5,
350 [ 0x5e ] = KEY_6,
351 [ 0x54 ] = KEY_7,
352 [ 0x4c ] = KEY_8,
353 [ 0x5c ] = KEY_9,
354
355 [ 0x5b ] = KEY_POWER,
356 [ 0x5f ] = KEY_MUTE,
357 [ 0x55 ] = KEY_GOTO,
358 [ 0x5d ] = KEY_SEARCH,
359 [ 0x17 ] = KEY_EPG, /* Guide */
360 [ 0x1f ] = KEY_MENU,
361 [ 0x0f ] = KEY_UP,
362 [ 0x46 ] = KEY_DOWN,
363 [ 0x16 ] = KEY_LEFT,
364 [ 0x1e ] = KEY_RIGHT,
365 [ 0x0e ] = KEY_SELECT, /* Enter */
366 [ 0x5a ] = KEY_INFO,
367 [ 0x52 ] = KEY_EXIT,
368 [ 0x59 ] = KEY_PREVIOUS,
369 [ 0x51 ] = KEY_NEXT,
370 [ 0x58 ] = KEY_REWIND,
371 [ 0x50 ] = KEY_FORWARD,
372 [ 0x44 ] = KEY_PLAYPAUSE,
373 [ 0x07 ] = KEY_STOP,
374 [ 0x1b ] = KEY_RECORD,
375 [ 0x13 ] = KEY_TUNER, /* Live */
376 [ 0x0a ] = KEY_A,
377 [ 0x12 ] = KEY_B,
378 [ 0x03 ] = KEY_PROG1, /* 1 */
379 [ 0x01 ] = KEY_PROG2, /* 2 */
380 [ 0x00 ] = KEY_PROG3, /* 3 */
381 [ 0x06 ] = KEY_DVD,
382 [ 0x48 ] = KEY_AUX, /* Photo */
383 [ 0x40 ] = KEY_VIDEO,
384 [ 0x19 ] = KEY_AUDIO, /* Music */
385 [ 0x0b ] = KEY_CHANNELUP,
386 [ 0x08 ] = KEY_CHANNELDOWN,
387 [ 0x15 ] = KEY_VOLUMEUP,
388 [ 0x1c ] = KEY_VOLUMEDOWN,
389};
390
391EXPORT_SYMBOL_GPL(ir_codes_adstech_dvb_t_pci);
392
393/* ---------------------------------------------------------------------- */
394
395/* MSI TV@nywhere remote */
396IR_KEYTAB_TYPE ir_codes_msi_tvanywhere[IR_KEYTAB_SIZE] = {
397 /* Keys 0 to 9 */
398 [ 0x00 ] = KEY_0,
399 [ 0x01 ] = KEY_1,
400 [ 0x02 ] = KEY_2,
401 [ 0x03 ] = KEY_3,
402 [ 0x04 ] = KEY_4,
403 [ 0x05 ] = KEY_5,
404 [ 0x06 ] = KEY_6,
405 [ 0x07 ] = KEY_7,
406 [ 0x08 ] = KEY_8,
407 [ 0x09 ] = KEY_9,
408
409 [ 0x0c ] = KEY_MUTE,
410 [ 0x0f ] = KEY_SCREEN, /* Full Screen */
411 [ 0x10 ] = KEY_F, /* Funtion */
412 [ 0x11 ] = KEY_T, /* Time shift */
413 [ 0x12 ] = KEY_POWER,
414 [ 0x13 ] = KEY_MEDIA, /* MTS */
415 [ 0x14 ] = KEY_SLOW,
416 [ 0x16 ] = KEY_REWIND, /* backward << */
417 [ 0x17 ] = KEY_ENTER, /* Return */
418 [ 0x18 ] = KEY_FASTFORWARD, /* forward >> */
419 [ 0x1a ] = KEY_CHANNELUP,
420 [ 0x1b ] = KEY_VOLUMEUP,
421 [ 0x1e ] = KEY_CHANNELDOWN,
422 [ 0x1f ] = KEY_VOLUMEDOWN,
423};
424
425EXPORT_SYMBOL_GPL(ir_codes_msi_tvanywhere);
426
427/* ---------------------------------------------------------------------- */
428
429/* Cinergy 1400 DVB-T */
430IR_KEYTAB_TYPE ir_codes_cinergy_1400[IR_KEYTAB_SIZE] = {
431 [ 0x01 ] = KEY_POWER,
432 [ 0x02 ] = KEY_1,
433 [ 0x03 ] = KEY_2,
434 [ 0x04 ] = KEY_3,
435 [ 0x05 ] = KEY_4,
436 [ 0x06 ] = KEY_5,
437 [ 0x07 ] = KEY_6,
438 [ 0x08 ] = KEY_7,
439 [ 0x09 ] = KEY_8,
440 [ 0x0a ] = KEY_9,
441 [ 0x0c ] = KEY_0,
442
443 [ 0x0b ] = KEY_VIDEO,
444 [ 0x0d ] = KEY_REFRESH,
445 [ 0x0e ] = KEY_SELECT,
446 [ 0x0f ] = KEY_EPG,
447 [ 0x10 ] = KEY_UP,
448 [ 0x11 ] = KEY_LEFT,
449 [ 0x12 ] = KEY_OK,
450 [ 0x13 ] = KEY_RIGHT,
451 [ 0x14 ] = KEY_DOWN,
452 [ 0x15 ] = KEY_TEXT,
453 [ 0x16 ] = KEY_INFO,
454
455 [ 0x17 ] = KEY_RED,
456 [ 0x18 ] = KEY_GREEN,
457 [ 0x19 ] = KEY_YELLOW,
458 [ 0x1a ] = KEY_BLUE,
459
460 [ 0x1b ] = KEY_CHANNELUP,
461 [ 0x1c ] = KEY_VOLUMEUP,
462 [ 0x1d ] = KEY_MUTE,
463 [ 0x1e ] = KEY_VOLUMEDOWN,
464 [ 0x1f ] = KEY_CHANNELDOWN,
465
466 [ 0x40 ] = KEY_PAUSE,
467 [ 0x4c ] = KEY_PLAY,
468 [ 0x58 ] = KEY_RECORD,
469 [ 0x54 ] = KEY_PREVIOUS,
470 [ 0x48 ] = KEY_STOP,
471 [ 0x5c ] = KEY_NEXT,
472};
473
474EXPORT_SYMBOL_GPL(ir_codes_cinergy_1400);
475
476/* ---------------------------------------------------------------------- */
477
478/* AVERTV STUDIO 303 Remote */
479IR_KEYTAB_TYPE ir_codes_avertv_303[IR_KEYTAB_SIZE] = {
480 [ 0x2a ] = KEY_1,
481 [ 0x32 ] = KEY_2,
482 [ 0x3a ] = KEY_3,
483 [ 0x4a ] = KEY_4,
484 [ 0x52 ] = KEY_5,
485 [ 0x5a ] = KEY_6,
486 [ 0x6a ] = KEY_7,
487 [ 0x72 ] = KEY_8,
488 [ 0x7a ] = KEY_9,
489 [ 0x0e ] = KEY_0,
490
491 [ 0x02 ] = KEY_POWER,
492 [ 0x22 ] = KEY_VIDEO,
493 [ 0x42 ] = KEY_AUDIO,
494 [ 0x62 ] = KEY_ZOOM,
495 [ 0x0a ] = KEY_TV,
496 [ 0x12 ] = KEY_CD,
497 [ 0x1a ] = KEY_TEXT,
498
499 [ 0x16 ] = KEY_SUBTITLE,
500 [ 0x1e ] = KEY_REWIND,
501 [ 0x06 ] = KEY_PRINT,
502
503 [ 0x2e ] = KEY_SEARCH,
504 [ 0x36 ] = KEY_SLEEP,
505 [ 0x3e ] = KEY_SHUFFLE,
506 [ 0x26 ] = KEY_MUTE,
507
508 [ 0x4e ] = KEY_RECORD,
509 [ 0x56 ] = KEY_PAUSE,
510 [ 0x5e ] = KEY_STOP,
511 [ 0x46 ] = KEY_PLAY,
512
513 [ 0x6e ] = KEY_RED,
514 [ 0x0b ] = KEY_GREEN,
515 [ 0x66 ] = KEY_YELLOW,
516 [ 0x03 ] = KEY_BLUE,
517
518 [ 0x76 ] = KEY_LEFT,
519 [ 0x7e ] = KEY_RIGHT,
520 [ 0x13 ] = KEY_DOWN,
521 [ 0x1b ] = KEY_UP,
522};
523
524EXPORT_SYMBOL_GPL(ir_codes_avertv_303);
525
526/* ---------------------------------------------------------------------- */
527
528/* DigitalNow DNTV Live! DVB-T Pro Remote */
529IR_KEYTAB_TYPE ir_codes_dntv_live_dvbt_pro[IR_KEYTAB_SIZE] = {
530 [ 0x16 ] = KEY_POWER,
531 [ 0x5b ] = KEY_HOME,
532
533 [ 0x55 ] = KEY_TV, /* live tv */
534 [ 0x58 ] = KEY_TUNER, /* digital Radio */
535 [ 0x5a ] = KEY_RADIO, /* FM radio */
536 [ 0x59 ] = KEY_DVD, /* dvd menu */
537 [ 0x03 ] = KEY_1,
538 [ 0x01 ] = KEY_2,
539 [ 0x06 ] = KEY_3,
540 [ 0x09 ] = KEY_4,
541 [ 0x1d ] = KEY_5,
542 [ 0x1f ] = KEY_6,
543 [ 0x0d ] = KEY_7,
544 [ 0x19 ] = KEY_8,
545 [ 0x1b ] = KEY_9,
546 [ 0x0c ] = KEY_CANCEL,
547 [ 0x15 ] = KEY_0,
548 [ 0x4a ] = KEY_CLEAR,
549 [ 0x13 ] = KEY_BACK,
550 [ 0x00 ] = KEY_TAB,
551 [ 0x4b ] = KEY_UP,
552 [ 0x4e ] = KEY_LEFT,
553 [ 0x4f ] = KEY_OK,
554 [ 0x52 ] = KEY_RIGHT,
555 [ 0x51 ] = KEY_DOWN,
556 [ 0x1e ] = KEY_VOLUMEUP,
557 [ 0x0a ] = KEY_VOLUMEDOWN,
558 [ 0x02 ] = KEY_CHANNELDOWN,
559 [ 0x05 ] = KEY_CHANNELUP,
560 [ 0x11 ] = KEY_RECORD,
561 [ 0x14 ] = KEY_PLAY,
562 [ 0x4c ] = KEY_PAUSE,
563 [ 0x1a ] = KEY_STOP,
564 [ 0x40 ] = KEY_REWIND,
565 [ 0x12 ] = KEY_FASTFORWARD,
566 [ 0x41 ] = KEY_PREVIOUSSONG, /* replay |< */
567 [ 0x42 ] = KEY_NEXTSONG, /* skip >| */
568 [ 0x54 ] = KEY_CAMERA, /* capture */
569 [ 0x50 ] = KEY_LANGUAGE, /* sap */
570 [ 0x47 ] = KEY_TV2, /* pip */
571 [ 0x4d ] = KEY_SCREEN,
572 [ 0x43 ] = KEY_SUBTITLE,
573 [ 0x10 ] = KEY_MUTE,
574 [ 0x49 ] = KEY_AUDIO, /* l/r */
575 [ 0x07 ] = KEY_SLEEP,
576 [ 0x08 ] = KEY_VIDEO, /* a/v */
577 [ 0x0e ] = KEY_PREVIOUS, /* recall */
578 [ 0x45 ] = KEY_ZOOM, /* zoom + */
579 [ 0x46 ] = KEY_ANGLE, /* zoom - */
580 [ 0x56 ] = KEY_RED,
581 [ 0x57 ] = KEY_GREEN,
582 [ 0x5c ] = KEY_YELLOW,
583 [ 0x5d ] = KEY_BLUE,
584};
585
586EXPORT_SYMBOL_GPL(ir_codes_dntv_live_dvbt_pro);
587
588IR_KEYTAB_TYPE ir_codes_em_terratec[IR_KEYTAB_SIZE] = {
589 [ 0x01 ] = KEY_CHANNEL,
590 [ 0x02 ] = KEY_SELECT,
591 [ 0x03 ] = KEY_MUTE,
592 [ 0x04 ] = KEY_POWER,
593 [ 0x05 ] = KEY_1,
594 [ 0x06 ] = KEY_2,
595 [ 0x07 ] = KEY_3,
596 [ 0x08 ] = KEY_CHANNELUP,
597 [ 0x09 ] = KEY_4,
598 [ 0x0a ] = KEY_5,
599 [ 0x0b ] = KEY_6,
600 [ 0x0c ] = KEY_CHANNELDOWN,
601 [ 0x0d ] = KEY_7,
602 [ 0x0e ] = KEY_8,
603 [ 0x0f ] = KEY_9,
604 [ 0x10 ] = KEY_VOLUMEUP,
605 [ 0x11 ] = KEY_0,
606 [ 0x12 ] = KEY_MENU,
607 [ 0x13 ] = KEY_PRINT,
608 [ 0x14 ] = KEY_VOLUMEDOWN,
609 [ 0x16 ] = KEY_PAUSE,
610 [ 0x18 ] = KEY_RECORD,
611 [ 0x19 ] = KEY_REWIND,
612 [ 0x1a ] = KEY_PLAY,
613 [ 0x1b ] = KEY_FORWARD,
614 [ 0x1c ] = KEY_BACKSPACE,
615 [ 0x1e ] = KEY_STOP,
616 [ 0x40 ] = KEY_ZOOM,
617};
618
619EXPORT_SYMBOL_GPL(ir_codes_em_terratec);
620
621IR_KEYTAB_TYPE ir_codes_em_pinnacle_usb[IR_KEYTAB_SIZE] = {
622 [ 0x3a ] = KEY_0,
623 [ 0x31 ] = KEY_1,
624 [ 0x32 ] = KEY_2,
625 [ 0x33 ] = KEY_3,
626 [ 0x34 ] = KEY_4,
627 [ 0x35 ] = KEY_5,
628 [ 0x36 ] = KEY_6,
629 [ 0x37 ] = KEY_7,
630 [ 0x38 ] = KEY_8,
631 [ 0x39 ] = KEY_9,
632
633 [ 0x2f ] = KEY_POWER,
634
635 [ 0x2e ] = KEY_P,
636 [ 0x1f ] = KEY_L,
637 [ 0x2b ] = KEY_I,
638
639 [ 0x2d ] = KEY_ZOOM,
640 [ 0x1e ] = KEY_ZOOM,
641 [ 0x1b ] = KEY_VOLUMEUP,
642 [ 0x0f ] = KEY_VOLUMEDOWN,
643 [ 0x17 ] = KEY_CHANNELUP,
644 [ 0x1c ] = KEY_CHANNELDOWN,
645 [ 0x25 ] = KEY_INFO,
646
647 [ 0x3c ] = KEY_MUTE,
648
649 [ 0x3d ] = KEY_LEFT,
650 [ 0x3b ] = KEY_RIGHT,
651
652 [ 0x3f ] = KEY_UP,
653 [ 0x3e ] = KEY_DOWN,
654 [ 0x1a ] = KEY_PAUSE,
655
656 [ 0x1d ] = KEY_MENU,
657 [ 0x19 ] = KEY_PLAY,
658 [ 0x16 ] = KEY_REWIND,
659 [ 0x13 ] = KEY_FORWARD,
660 [ 0x15 ] = KEY_PAUSE,
661 [ 0x0e ] = KEY_REWIND,
662 [ 0x0d ] = KEY_PLAY,
663 [ 0x0b ] = KEY_STOP,
664 [ 0x07 ] = KEY_FORWARD,
665 [ 0x27 ] = KEY_RECORD,
666 [ 0x26 ] = KEY_TUNER,
667 [ 0x29 ] = KEY_TEXT,
668 [ 0x2a ] = KEY_MEDIA,
669 [ 0x18 ] = KEY_EPG,
670 [ 0x27 ] = KEY_RECORD,
671};
672
673EXPORT_SYMBOL_GPL(ir_codes_em_pinnacle_usb);
674
675IR_KEYTAB_TYPE ir_codes_flyvideo[IR_KEYTAB_SIZE] = {
676 [ 0x0f ] = KEY_0,
677 [ 0x03 ] = KEY_1,
678 [ 0x04 ] = KEY_2,
679 [ 0x05 ] = KEY_3,
680 [ 0x07 ] = KEY_4,
681 [ 0x08 ] = KEY_5,
682 [ 0x09 ] = KEY_6,
683 [ 0x0b ] = KEY_7,
684 [ 0x0c ] = KEY_8,
685 [ 0x0d ] = KEY_9,
686
687 [ 0x0e ] = KEY_MODE, // Air/Cable
688 [ 0x11 ] = KEY_VIDEO, // Video
689 [ 0x15 ] = KEY_AUDIO, // Audio
690 [ 0x00 ] = KEY_POWER, // Power
691 [ 0x18 ] = KEY_TUNER, // AV Source
692 [ 0x02 ] = KEY_ZOOM, // Fullscreen
693 [ 0x1a ] = KEY_LANGUAGE, // Stereo
694 [ 0x1b ] = KEY_MUTE, // Mute
695 [ 0x14 ] = KEY_VOLUMEUP, // Volume +
696 [ 0x17 ] = KEY_VOLUMEDOWN, // Volume -
697 [ 0x12 ] = KEY_CHANNELUP, // Channel +
698 [ 0x13 ] = KEY_CHANNELDOWN, // Channel -
699 [ 0x06 ] = KEY_AGAIN, // Recall
700 [ 0x10 ] = KEY_ENTER, // Enter
701};
702
703EXPORT_SYMBOL_GPL(ir_codes_flyvideo);
704
705IR_KEYTAB_TYPE ir_codes_flydvb[IR_KEYTAB_SIZE] = {
706 [ 0x01 ] = KEY_ZOOM, // Full Screen
707 [ 0x00 ] = KEY_POWER, // Power
708
709 [ 0x03 ] = KEY_1,
710 [ 0x04 ] = KEY_2,
711 [ 0x05 ] = KEY_3,
712 [ 0x07 ] = KEY_4,
713 [ 0x08 ] = KEY_5,
714 [ 0x09 ] = KEY_6,
715 [ 0x0b ] = KEY_7,
716 [ 0x0c ] = KEY_8,
717 [ 0x0d ] = KEY_9,
718 [ 0x06 ] = KEY_AGAIN, // Recall
719 [ 0x0f ] = KEY_0,
720 [ 0x10 ] = KEY_MUTE, // Mute
721 [ 0x02 ] = KEY_RADIO, // TV/Radio
722 [ 0x1b ] = KEY_LANGUAGE, // SAP (Second Audio Program)
723
724 [ 0x14 ] = KEY_VOLUMEUP, // VOL+
725 [ 0x17 ] = KEY_VOLUMEDOWN, // VOL-
726 [ 0x12 ] = KEY_CHANNELUP, // CH+
727 [ 0x13 ] = KEY_CHANNELDOWN, // CH-
728 [ 0x1d ] = KEY_ENTER, // Enter
729
730 [ 0x1a ] = KEY_MODE, // PIP
731 [ 0x18 ] = KEY_TUNER, // Source
732
733 [ 0x1e ] = KEY_RECORD, // Record/Pause
734 [ 0x15 ] = KEY_ANGLE, // Swap (no label on key)
735 [ 0x1c ] = KEY_PAUSE, // Timeshift/Pause
736 [ 0x19 ] = KEY_BACK, // Rewind <<
737 [ 0x0a ] = KEY_PLAYPAUSE, // Play/Pause
738 [ 0x1f ] = KEY_FORWARD, // Forward >>
739 [ 0x16 ] = KEY_PREVIOUS, // Back |<<
740 [ 0x11 ] = KEY_STOP, // Stop
741 [ 0x0e ] = KEY_NEXT, // End >>|
742};
743
744EXPORT_SYMBOL_GPL(ir_codes_flydvb);
745
746IR_KEYTAB_TYPE ir_codes_cinergy[IR_KEYTAB_SIZE] = {
747 [ 0x00 ] = KEY_0,
748 [ 0x01 ] = KEY_1,
749 [ 0x02 ] = KEY_2,
750 [ 0x03 ] = KEY_3,
751 [ 0x04 ] = KEY_4,
752 [ 0x05 ] = KEY_5,
753 [ 0x06 ] = KEY_6,
754 [ 0x07 ] = KEY_7,
755 [ 0x08 ] = KEY_8,
756 [ 0x09 ] = KEY_9,
757
758 [ 0x0a ] = KEY_POWER,
759 [ 0x0b ] = KEY_PROG1, // app
760 [ 0x0c ] = KEY_ZOOM, // zoom/fullscreen
761 [ 0x0d ] = KEY_CHANNELUP, // channel
762 [ 0x0e ] = KEY_CHANNELDOWN, // channel-
763 [ 0x0f ] = KEY_VOLUMEUP,
764 [ 0x10 ] = KEY_VOLUMEDOWN,
765 [ 0x11 ] = KEY_TUNER, // AV
766 [ 0x12 ] = KEY_NUMLOCK, // -/--
767 [ 0x13 ] = KEY_AUDIO, // audio
768 [ 0x14 ] = KEY_MUTE,
769 [ 0x15 ] = KEY_UP,
770 [ 0x16 ] = KEY_DOWN,
771 [ 0x17 ] = KEY_LEFT,
772 [ 0x18 ] = KEY_RIGHT,
773 [ 0x19 ] = BTN_LEFT,
774 [ 0x1a ] = BTN_RIGHT,
775 [ 0x1b ] = KEY_WWW, // text
776 [ 0x1c ] = KEY_REWIND,
777 [ 0x1d ] = KEY_FORWARD,
778 [ 0x1e ] = KEY_RECORD,
779 [ 0x1f ] = KEY_PLAY,
780 [ 0x20 ] = KEY_PREVIOUSSONG,
781 [ 0x21 ] = KEY_NEXTSONG,
782 [ 0x22 ] = KEY_PAUSE,
783 [ 0x23 ] = KEY_STOP,
784};
785
786EXPORT_SYMBOL_GPL(ir_codes_cinergy);
787
788/* Alfons Geser <a.geser@cox.net>
789 * updates from Job D. R. Borges <jobdrb@ig.com.br> */
790IR_KEYTAB_TYPE ir_codes_eztv[IR_KEYTAB_SIZE] = {
791 [ 0x12 ] = KEY_POWER,
792 [ 0x01 ] = KEY_TV, // DVR
793 [ 0x15 ] = KEY_DVD, // DVD
794 [ 0x17 ] = KEY_AUDIO, // music
795 // DVR mode / DVD mode / music mode
796
797 [ 0x1b ] = KEY_MUTE, // mute
798 [ 0x02 ] = KEY_LANGUAGE, // MTS/SAP / audio / autoseek
799 [ 0x1e ] = KEY_SUBTITLE, // closed captioning / subtitle / seek
800 [ 0x16 ] = KEY_ZOOM, // full screen
801 [ 0x1c ] = KEY_VIDEO, // video source / eject / delall
802 [ 0x1d ] = KEY_RESTART, // playback / angle / del
803 [ 0x2f ] = KEY_SEARCH, // scan / menu / playlist
804 [ 0x30 ] = KEY_CHANNEL, // CH surfing / bookmark / memo
805
806 [ 0x31 ] = KEY_HELP, // help
807 [ 0x32 ] = KEY_MODE, // num/memo
808 [ 0x33 ] = KEY_ESC, // cancel
809
810 [ 0x0c ] = KEY_UP, // up
811 [ 0x10 ] = KEY_DOWN, // down
812 [ 0x08 ] = KEY_LEFT, // left
813 [ 0x04 ] = KEY_RIGHT, // right
814 [ 0x03 ] = KEY_SELECT, // select
815
816 [ 0x1f ] = KEY_REWIND, // rewind
817 [ 0x20 ] = KEY_PLAYPAUSE, // play/pause
818 [ 0x29 ] = KEY_FORWARD, // forward
819 [ 0x14 ] = KEY_AGAIN, // repeat
820 [ 0x2b ] = KEY_RECORD, // recording
821 [ 0x2c ] = KEY_STOP, // stop
822 [ 0x2d ] = KEY_PLAY, // play
823 [ 0x2e ] = KEY_SHUFFLE, // snapshot / shuffle
824
825 [ 0x00 ] = KEY_0,
826 [ 0x05 ] = KEY_1,
827 [ 0x06 ] = KEY_2,
828 [ 0x07 ] = KEY_3,
829 [ 0x09 ] = KEY_4,
830 [ 0x0a ] = KEY_5,
831 [ 0x0b ] = KEY_6,
832 [ 0x0d ] = KEY_7,
833 [ 0x0e ] = KEY_8,
834 [ 0x0f ] = KEY_9,
835
836 [ 0x2a ] = KEY_VOLUMEUP,
837 [ 0x11 ] = KEY_VOLUMEDOWN,
838 [ 0x18 ] = KEY_CHANNELUP, // CH.tracking up
839 [ 0x19 ] = KEY_CHANNELDOWN, // CH.tracking down
840
841 [ 0x13 ] = KEY_ENTER, // enter
842 [ 0x21 ] = KEY_DOT, // . (decimal dot)
843};
844
845EXPORT_SYMBOL_GPL(ir_codes_eztv);
846
847/* Alex Hermann <gaaf@gmx.net> */
848IR_KEYTAB_TYPE ir_codes_avermedia[IR_KEYTAB_SIZE] = {
849 [ 0x28 ] = KEY_1,
850 [ 0x18 ] = KEY_2,
851 [ 0x38 ] = KEY_3,
852 [ 0x24 ] = KEY_4,
853 [ 0x14 ] = KEY_5,
854 [ 0x34 ] = KEY_6,
855 [ 0x2c ] = KEY_7,
856 [ 0x1c ] = KEY_8,
857 [ 0x3c ] = KEY_9,
858 [ 0x22 ] = KEY_0,
859
860 [ 0x20 ] = KEY_TV, /* TV/FM */
861 [ 0x10 ] = KEY_CD, /* CD */
862 [ 0x30 ] = KEY_TEXT, /* TELETEXT */
863 [ 0x00 ] = KEY_POWER, /* POWER */
864
865 [ 0x08 ] = KEY_VIDEO, /* VIDEO */
866 [ 0x04 ] = KEY_AUDIO, /* AUDIO */
867 [ 0x0c ] = KEY_ZOOM, /* FULL SCREEN */
868
869 [ 0x12 ] = KEY_SUBTITLE, /* DISPLAY */
870 [ 0x32 ] = KEY_REWIND, /* LOOP */
871 [ 0x02 ] = KEY_PRINT, /* PREVIEW */
872
873 [ 0x2a ] = KEY_SEARCH, /* AUTOSCAN */
874 [ 0x1a ] = KEY_SLEEP, /* FREEZE */
875 [ 0x3a ] = KEY_SHUFFLE, /* SNAPSHOT */
876 [ 0x0a ] = KEY_MUTE, /* MUTE */
877
878 [ 0x26 ] = KEY_RECORD, /* RECORD */
879 [ 0x16 ] = KEY_PAUSE, /* PAUSE */
880 [ 0x36 ] = KEY_STOP, /* STOP */
881 [ 0x06 ] = KEY_PLAY, /* PLAY */
882
883 [ 0x2e ] = KEY_RED, /* RED */
884 [ 0x21 ] = KEY_GREEN, /* GREEN */
885 [ 0x0e ] = KEY_YELLOW, /* YELLOW */
886 [ 0x01 ] = KEY_BLUE, /* BLUE */
887
888 [ 0x1e ] = KEY_VOLUMEDOWN, /* VOLUME- */
889 [ 0x3e ] = KEY_VOLUMEUP, /* VOLUME+ */
890 [ 0x11 ] = KEY_CHANNELDOWN, /* CHANNEL/PAGE- */
891 [ 0x31 ] = KEY_CHANNELUP /* CHANNEL/PAGE+ */
892};
893
894EXPORT_SYMBOL_GPL(ir_codes_avermedia);
895
896IR_KEYTAB_TYPE ir_codes_videomate_tv_pvr[IR_KEYTAB_SIZE] = {
897 [ 0x14 ] = KEY_MUTE,
898 [ 0x24 ] = KEY_ZOOM,
899
900 [ 0x01 ] = KEY_DVD,
901 [ 0x23 ] = KEY_RADIO,
902 [ 0x00 ] = KEY_TV,
903
904 [ 0x0a ] = KEY_REWIND,
905 [ 0x08 ] = KEY_PLAYPAUSE,
906 [ 0x0f ] = KEY_FORWARD,
907
908 [ 0x02 ] = KEY_PREVIOUS,
909 [ 0x07 ] = KEY_STOP,
910 [ 0x06 ] = KEY_NEXT,
911
912 [ 0x0c ] = KEY_UP,
913 [ 0x0e ] = KEY_DOWN,
914 [ 0x0b ] = KEY_LEFT,
915 [ 0x0d ] = KEY_RIGHT,
916 [ 0x11 ] = KEY_OK,
917
918 [ 0x03 ] = KEY_MENU,
919 [ 0x09 ] = KEY_SETUP,
920 [ 0x05 ] = KEY_VIDEO,
921 [ 0x22 ] = KEY_CHANNEL,
922
923 [ 0x12 ] = KEY_VOLUMEUP,
924 [ 0x15 ] = KEY_VOLUMEDOWN,
925 [ 0x10 ] = KEY_CHANNELUP,
926 [ 0x13 ] = KEY_CHANNELDOWN,
927
928 [ 0x04 ] = KEY_RECORD,
929
930 [ 0x16 ] = KEY_1,
931 [ 0x17 ] = KEY_2,
932 [ 0x18 ] = KEY_3,
933 [ 0x19 ] = KEY_4,
934 [ 0x1a ] = KEY_5,
935 [ 0x1b ] = KEY_6,
936 [ 0x1c ] = KEY_7,
937 [ 0x1d ] = KEY_8,
938 [ 0x1e ] = KEY_9,
939 [ 0x1f ] = KEY_0,
940
941 [ 0x20 ] = KEY_LANGUAGE,
942 [ 0x21 ] = KEY_SLEEP,
943};
944
945EXPORT_SYMBOL_GPL(ir_codes_videomate_tv_pvr);
946
947/* Michael Tokarev <mjt@tls.msk.ru>
948 http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
949 keytable is used by MANLI MTV00[ 0x0c ] and BeholdTV 40[13] at
950 least, and probably other cards too.
951 The "ascii-art picture" below (in comments, first row
952 is the keycode in hex, and subsequent row(s) shows
953 the button labels (several variants when appropriate)
954 helps to descide which keycodes to assign to the buttons.
955 */
956IR_KEYTAB_TYPE ir_codes_manli[IR_KEYTAB_SIZE] = {
957
958 /* 0x1c 0x12 *
959 * FUNCTION POWER *
960 * FM (|) *
961 * */
962 [ 0x1c ] = KEY_RADIO, /*XXX*/
963 [ 0x12 ] = KEY_POWER,
964
965 /* 0x01 0x02 0x03 *
966 * 1 2 3 *
967 * *
968 * 0x04 0x05 0x06 *
969 * 4 5 6 *
970 * *
971 * 0x07 0x08 0x09 *
972 * 7 8 9 *
973 * */
974 [ 0x01 ] = KEY_1,
975 [ 0x02 ] = KEY_2,
976 [ 0x03 ] = KEY_3,
977 [ 0x04 ] = KEY_4,
978 [ 0x05 ] = KEY_5,
979 [ 0x06 ] = KEY_6,
980 [ 0x07 ] = KEY_7,
981 [ 0x08 ] = KEY_8,
982 [ 0x09 ] = KEY_9,
983
984 /* 0x0a 0x00 0x17 *
985 * RECALL 0 +100 *
986 * PLUS *
987 * */
988 [ 0x0a ] = KEY_AGAIN, /*XXX KEY_REWIND? */
989 [ 0x00 ] = KEY_0,
990 [ 0x17 ] = KEY_DIGITS, /*XXX*/
991
992 /* 0x14 0x10 *
993 * MENU INFO *
994 * OSD */
995 [ 0x14 ] = KEY_MENU,
996 [ 0x10 ] = KEY_INFO,
997
998 /* 0x0b *
999 * Up *
1000 * *
1001 * 0x18 0x16 0x0c *
1002 * Left Ok Right *
1003 * *
1004 * 0x015 *
1005 * Down *
1006 * */
1007 [ 0x0b ] = KEY_UP, /*XXX KEY_SCROLLUP? */
1008 [ 0x18 ] = KEY_LEFT, /*XXX KEY_BACK? */
1009 [ 0x16 ] = KEY_OK, /*XXX KEY_SELECT? KEY_ENTER? */
1010 [ 0x0c ] = KEY_RIGHT, /*XXX KEY_FORWARD? */
1011 [ 0x15 ] = KEY_DOWN, /*XXX KEY_SCROLLDOWN? */
1012
1013 /* 0x11 0x0d *
1014 * TV/AV MODE *
1015 * SOURCE STEREO *
1016 * */
1017 [ 0x11 ] = KEY_TV, /*XXX*/
1018 [ 0x0d ] = KEY_MODE, /*XXX there's no KEY_STEREO */
1019
1020 /* 0x0f 0x1b 0x1a *
1021 * AUDIO Vol+ Chan+ *
1022 * TIMESHIFT??? *
1023 * *
1024 * 0x0e 0x1f 0x1e *
1025 * SLEEP Vol- Chan- *
1026 * */
1027 [ 0x0f ] = KEY_AUDIO,
1028 [ 0x1b ] = KEY_VOLUMEUP,
1029 [ 0x1a ] = KEY_CHANNELUP,
1030 [ 0x0e ] = KEY_SLEEP, /*XXX maybe KEY_PAUSE */
1031 [ 0x1f ] = KEY_VOLUMEDOWN,
1032 [ 0x1e ] = KEY_CHANNELDOWN,
1033
1034 /* 0x13 0x19 *
1035 * MUTE SNAPSHOT*
1036 * */
1037 [ 0x13 ] = KEY_MUTE,
1038 [ 0x19 ] = KEY_RECORD, /*XXX*/
1039
1040 // 0x1d unused ?
1041};
1042
1043EXPORT_SYMBOL_GPL(ir_codes_manli);
1044
1045/* Mike Baikov <mike@baikov.com> */
1046IR_KEYTAB_TYPE ir_codes_gotview7135[IR_KEYTAB_SIZE] = {
1047
1048 [ 0x21 ] = KEY_POWER,
1049 [ 0x69 ] = KEY_TV,
1050 [ 0x33 ] = KEY_0,
1051 [ 0x51 ] = KEY_1,
1052 [ 0x31 ] = KEY_2,
1053 [ 0x71 ] = KEY_3,
1054 [ 0x3b ] = KEY_4,
1055 [ 0x58 ] = KEY_5,
1056 [ 0x41 ] = KEY_6,
1057 [ 0x48 ] = KEY_7,
1058 [ 0x30 ] = KEY_8,
1059 [ 0x53 ] = KEY_9,
1060 [ 0x73 ] = KEY_AGAIN, /* LOOP */
1061 [ 0x0a ] = KEY_AUDIO,
1062 [ 0x61 ] = KEY_PRINT, /* PREVIEW */
1063 [ 0x7a ] = KEY_VIDEO,
1064 [ 0x20 ] = KEY_CHANNELUP,
1065 [ 0x40 ] = KEY_CHANNELDOWN,
1066 [ 0x18 ] = KEY_VOLUMEDOWN,
1067 [ 0x50 ] = KEY_VOLUMEUP,
1068 [ 0x10 ] = KEY_MUTE,
1069 [ 0x4a ] = KEY_SEARCH,
1070 [ 0x7b ] = KEY_SHUFFLE, /* SNAPSHOT */
1071 [ 0x22 ] = KEY_RECORD,
1072 [ 0x62 ] = KEY_STOP,
1073 [ 0x78 ] = KEY_PLAY,
1074 [ 0x39 ] = KEY_REWIND,
1075 [ 0x59 ] = KEY_PAUSE,
1076 [ 0x19 ] = KEY_FORWARD,
1077 [ 0x09 ] = KEY_ZOOM,
1078
1079 [ 0x52 ] = KEY_F21, /* LIVE TIMESHIFT */
1080 [ 0x1a ] = KEY_F22, /* MIN TIMESHIFT */
1081 [ 0x3a ] = KEY_F23, /* TIMESHIFT */
1082 [ 0x70 ] = KEY_F24, /* NORMAL TIMESHIFT */
1083};
1084
1085EXPORT_SYMBOL_GPL(ir_codes_gotview7135);
1086
1087IR_KEYTAB_TYPE ir_codes_purpletv[IR_KEYTAB_SIZE] = {
1088 [ 0x03 ] = KEY_POWER,
1089 [ 0x6f ] = KEY_MUTE,
1090 [ 0x10 ] = KEY_BACKSPACE, /* Recall */
1091
1092 [ 0x11 ] = KEY_0,
1093 [ 0x04 ] = KEY_1,
1094 [ 0x05 ] = KEY_2,
1095 [ 0x06 ] = KEY_3,
1096 [ 0x08 ] = KEY_4,
1097 [ 0x09 ] = KEY_5,
1098 [ 0x0a ] = KEY_6,
1099 [ 0x0c ] = KEY_7,
1100 [ 0x0d ] = KEY_8,
1101 [ 0x0e ] = KEY_9,
1102 [ 0x12 ] = KEY_DOT, /* 100+ */
1103
1104 [ 0x07 ] = KEY_VOLUMEUP,
1105 [ 0x0b ] = KEY_VOLUMEDOWN,
1106 [ 0x1a ] = KEY_KPPLUS,
1107 [ 0x18 ] = KEY_KPMINUS,
1108 [ 0x15 ] = KEY_UP,
1109 [ 0x1d ] = KEY_DOWN,
1110 [ 0x0f ] = KEY_CHANNELUP,
1111 [ 0x13 ] = KEY_CHANNELDOWN,
1112 [ 0x48 ] = KEY_ZOOM,
1113
1114 [ 0x1b ] = KEY_VIDEO, /* Video source */
1115 [ 0x49 ] = KEY_LANGUAGE, /* MTS Select */
1116 [ 0x19 ] = KEY_SEARCH, /* Auto Scan */
1117
1118 [ 0x4b ] = KEY_RECORD,
1119 [ 0x46 ] = KEY_PLAY,
1120 [ 0x45 ] = KEY_PAUSE, /* Pause */
1121 [ 0x44 ] = KEY_STOP,
1122 [ 0x40 ] = KEY_FORWARD, /* Forward ? */
1123 [ 0x42 ] = KEY_REWIND, /* Backward ? */
1124
1125};
1126
1127EXPORT_SYMBOL_GPL(ir_codes_purpletv);
1128
1129/* Mapping for the 28 key remote control as seen at
1130 http://www.sednacomputer.com/photo/cardbus-tv.jpg
1131 Pavel Mihaylov <bin@bash.info> */
1132IR_KEYTAB_TYPE ir_codes_pctv_sedna[IR_KEYTAB_SIZE] = {
1133 [ 0x00 ] = KEY_0,
1134 [ 0x01 ] = KEY_1,
1135 [ 0x02 ] = KEY_2,
1136 [ 0x03 ] = KEY_3,
1137 [ 0x04 ] = KEY_4,
1138 [ 0x05 ] = KEY_5,
1139 [ 0x06 ] = KEY_6,
1140 [ 0x07 ] = KEY_7,
1141 [ 0x08 ] = KEY_8,
1142 [ 0x09 ] = KEY_9,
1143
1144 [ 0x0a ] = KEY_AGAIN, /* Recall */
1145 [ 0x0b ] = KEY_CHANNELUP,
1146 [ 0x0c ] = KEY_VOLUMEUP,
1147 [ 0x0d ] = KEY_MODE, /* Stereo */
1148 [ 0x0e ] = KEY_STOP,
1149 [ 0x0f ] = KEY_PREVIOUSSONG,
1150 [ 0x10 ] = KEY_ZOOM,
1151 [ 0x11 ] = KEY_TUNER, /* Source */
1152 [ 0x12 ] = KEY_POWER,
1153 [ 0x13 ] = KEY_MUTE,
1154 [ 0x15 ] = KEY_CHANNELDOWN,
1155 [ 0x18 ] = KEY_VOLUMEDOWN,
1156 [ 0x19 ] = KEY_SHUFFLE, /* Snapshot */
1157 [ 0x1a ] = KEY_NEXTSONG,
1158 [ 0x1b ] = KEY_TEXT, /* Time Shift */
1159 [ 0x1c ] = KEY_RADIO, /* FM Radio */
1160 [ 0x1d ] = KEY_RECORD,
1161 [ 0x1e ] = KEY_PAUSE,
1162};
1163
1164EXPORT_SYMBOL_GPL(ir_codes_pctv_sedna);
1165
1166/* Mark Phalan <phalanm@o2.ie> */
1167IR_KEYTAB_TYPE ir_codes_pv951[IR_KEYTAB_SIZE] = {
1168 [ 0x00 ] = KEY_0,
1169 [ 0x01 ] = KEY_1,
1170 [ 0x02 ] = KEY_2,
1171 [ 0x03 ] = KEY_3,
1172 [ 0x04 ] = KEY_4,
1173 [ 0x05 ] = KEY_5,
1174 [ 0x06 ] = KEY_6,
1175 [ 0x07 ] = KEY_7,
1176 [ 0x08 ] = KEY_8,
1177 [ 0x09 ] = KEY_9,
1178
1179 [ 0x12 ] = KEY_POWER,
1180 [ 0x10 ] = KEY_MUTE,
1181 [ 0x1f ] = KEY_VOLUMEDOWN,
1182 [ 0x1b ] = KEY_VOLUMEUP,
1183 [ 0x1a ] = KEY_CHANNELUP,
1184 [ 0x1e ] = KEY_CHANNELDOWN,
1185 [ 0x0e ] = KEY_PAGEUP,
1186 [ 0x1d ] = KEY_PAGEDOWN,
1187 [ 0x13 ] = KEY_SOUND,
1188
1189 [ 0x18 ] = KEY_KPPLUSMINUS, /* CH +/- */
1190 [ 0x16 ] = KEY_SUBTITLE, /* CC */
1191 [ 0x0d ] = KEY_TEXT, /* TTX */
1192 [ 0x0b ] = KEY_TV, /* AIR/CBL */
1193 [ 0x11 ] = KEY_PC, /* PC/TV */
1194 [ 0x17 ] = KEY_OK, /* CH RTN */
1195 [ 0x19 ] = KEY_MODE, /* FUNC */
1196 [ 0x0c ] = KEY_SEARCH, /* AUTOSCAN */
1197
1198 /* Not sure what to do with these ones! */
1199 [ 0x0f ] = KEY_SELECT, /* SOURCE */
1200 [ 0x0a ] = KEY_KPPLUS, /* +100 */
1201 [ 0x14 ] = KEY_EQUAL, /* SYNC */
1202 [ 0x1c ] = KEY_MEDIA, /* PC/TV */
1203};
1204
1205EXPORT_SYMBOL_GPL(ir_codes_pv951);
1206
1207/* generic RC5 keytable */
1208/* see http://users.pandora.be/nenya/electronics/rc5/codes00.htm */
1209/* used by old (black) Hauppauge remotes */
1210IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE] = {
1211 /* Keys 0 to 9 */
1212 [ 0x00 ] = KEY_0,
1213 [ 0x01 ] = KEY_1,
1214 [ 0x02 ] = KEY_2,
1215 [ 0x03 ] = KEY_3,
1216 [ 0x04 ] = KEY_4,
1217 [ 0x05 ] = KEY_5,
1218 [ 0x06 ] = KEY_6,
1219 [ 0x07 ] = KEY_7,
1220 [ 0x08 ] = KEY_8,
1221 [ 0x09 ] = KEY_9,
1222
1223 [ 0x0b ] = KEY_CHANNEL, /* channel / program (japan: 11) */
1224 [ 0x0c ] = KEY_POWER, /* standby */
1225 [ 0x0d ] = KEY_MUTE, /* mute / demute */
1226 [ 0x0f ] = KEY_TV, /* display */
1227 [ 0x10 ] = KEY_VOLUMEUP,
1228 [ 0x11 ] = KEY_VOLUMEDOWN,
1229 [ 0x12 ] = KEY_BRIGHTNESSUP,
1230 [ 0x13 ] = KEY_BRIGHTNESSDOWN,
1231 [ 0x1e ] = KEY_SEARCH, /* search + */
1232 [ 0x20 ] = KEY_CHANNELUP, /* channel / program + */
1233 [ 0x21 ] = KEY_CHANNELDOWN, /* channel / program - */
1234 [ 0x22 ] = KEY_CHANNEL, /* alt / channel */
1235 [ 0x23 ] = KEY_LANGUAGE, /* 1st / 2nd language */
1236 [ 0x26 ] = KEY_SLEEP, /* sleeptimer */
1237 [ 0x2e ] = KEY_MENU, /* 2nd controls (USA: menu) */
1238 [ 0x30 ] = KEY_PAUSE,
1239 [ 0x32 ] = KEY_REWIND,
1240 [ 0x33 ] = KEY_GOTO,
1241 [ 0x35 ] = KEY_PLAY,
1242 [ 0x36 ] = KEY_STOP,
1243 [ 0x37 ] = KEY_RECORD, /* recording */
1244 [ 0x3c ] = KEY_TEXT, /* teletext submode (Japan: 12) */
1245 [ 0x3d ] = KEY_SUSPEND, /* system standby */
1246
1247};
1248
1249EXPORT_SYMBOL_GPL(ir_codes_rc5_tv);
1250
1251/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
1252IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE] = {
1253 /* Keys 0 to 9 */
1254 [ 0x12 ] = KEY_0,
1255 [ 0x05 ] = KEY_1,
1256 [ 0x06 ] = KEY_2,
1257 [ 0x07 ] = KEY_3,
1258 [ 0x09 ] = KEY_4,
1259 [ 0x0a ] = KEY_5,
1260 [ 0x0b ] = KEY_6,
1261 [ 0x0d ] = KEY_7,
1262 [ 0x0e ] = KEY_8,
1263 [ 0x0f ] = KEY_9,
1264
1265 [ 0x00 ] = KEY_POWER,
1266 [ 0x02 ] = KEY_TUNER, /* TV/FM */
1267 [ 0x1e ] = KEY_VIDEO,
1268 [ 0x04 ] = KEY_VOLUMEUP,
1269 [ 0x08 ] = KEY_VOLUMEDOWN,
1270 [ 0x0c ] = KEY_CHANNELUP,
1271 [ 0x10 ] = KEY_CHANNELDOWN,
1272 [ 0x03 ] = KEY_ZOOM, /* fullscreen */
1273 [ 0x1f ] = KEY_SUBTITLE, /* closed caption/teletext */
1274 [ 0x20 ] = KEY_SLEEP,
1275 [ 0x14 ] = KEY_MUTE,
1276 [ 0x2b ] = KEY_RED,
1277 [ 0x2c ] = KEY_GREEN,
1278 [ 0x2d ] = KEY_YELLOW,
1279 [ 0x2e ] = KEY_BLUE,
1280 [ 0x18 ] = KEY_KPPLUS, /* fine tune + */
1281 [ 0x19 ] = KEY_KPMINUS, /* fine tune - */
1282 [ 0x21 ] = KEY_DOT,
1283 [ 0x13 ] = KEY_ENTER,
1284 [ 0x22 ] = KEY_BACK,
1285 [ 0x23 ] = KEY_PLAYPAUSE,
1286 [ 0x24 ] = KEY_NEXT,
1287 [ 0x26 ] = KEY_STOP,
1288 [ 0x27 ] = KEY_RECORD
1289};
1290
1291EXPORT_SYMBOL_GPL(ir_codes_winfast);
1292
1293IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE] = {
1294 [ 0x59 ] = KEY_MUTE,
1295 [ 0x4a ] = KEY_POWER,
1296
1297 [ 0x18 ] = KEY_TEXT,
1298 [ 0x26 ] = KEY_TV,
1299 [ 0x3d ] = KEY_PRINT,
1300
1301 [ 0x48 ] = KEY_RED,
1302 [ 0x04 ] = KEY_GREEN,
1303 [ 0x11 ] = KEY_YELLOW,
1304 [ 0x00 ] = KEY_BLUE,
1305
1306 [ 0x2d ] = KEY_VOLUMEUP,
1307 [ 0x1e ] = KEY_VOLUMEDOWN,
1308
1309 [ 0x49 ] = KEY_MENU,
1310
1311 [ 0x16 ] = KEY_CHANNELUP,
1312 [ 0x17 ] = KEY_CHANNELDOWN,
1313
1314 [ 0x20 ] = KEY_UP,
1315 [ 0x21 ] = KEY_DOWN,
1316 [ 0x22 ] = KEY_LEFT,
1317 [ 0x23 ] = KEY_RIGHT,
1318 [ 0x0d ] = KEY_SELECT,
1319
1320
1321
1322 [ 0x08 ] = KEY_BACK,
1323 [ 0x07 ] = KEY_REFRESH,
1324
1325 [ 0x2f ] = KEY_ZOOM,
1326 [ 0x29 ] = KEY_RECORD,
1327
1328 [ 0x4b ] = KEY_PAUSE,
1329 [ 0x4d ] = KEY_REWIND,
1330 [ 0x2e ] = KEY_PLAY,
1331 [ 0x4e ] = KEY_FORWARD,
1332 [ 0x53 ] = KEY_PREVIOUS,
1333 [ 0x4c ] = KEY_STOP,
1334 [ 0x54 ] = KEY_NEXT,
1335
1336 [ 0x69 ] = KEY_0,
1337 [ 0x6a ] = KEY_1,
1338 [ 0x6b ] = KEY_2,
1339 [ 0x6c ] = KEY_3,
1340 [ 0x6d ] = KEY_4,
1341 [ 0x6e ] = KEY_5,
1342 [ 0x6f ] = KEY_6,
1343 [ 0x70 ] = KEY_7,
1344 [ 0x71 ] = KEY_8,
1345 [ 0x72 ] = KEY_9,
1346
1347 [ 0x74 ] = KEY_CHANNEL,
1348 [ 0x0a ] = KEY_BACKSPACE,
1349};
1350
1351EXPORT_SYMBOL_GPL(ir_codes_pinnacle);
1352
1353/* Hauppauge: the newer, gray remotes (seems there are multiple
1354 * slightly different versions), shipped with cx88+ivtv cards.
1355 * almost rc5 coding, but some non-standard keys */
1356IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE] = {
1357 /* Keys 0 to 9 */
1358 [ 0x00 ] = KEY_0,
1359 [ 0x01 ] = KEY_1,
1360 [ 0x02 ] = KEY_2,
1361 [ 0x03 ] = KEY_3,
1362 [ 0x04 ] = KEY_4,
1363 [ 0x05 ] = KEY_5,
1364 [ 0x06 ] = KEY_6,
1365 [ 0x07 ] = KEY_7,
1366 [ 0x08 ] = KEY_8,
1367 [ 0x09 ] = KEY_9,
1368
1369 [ 0x0a ] = KEY_TEXT, /* keypad asterisk as well */
1370 [ 0x0b ] = KEY_RED, /* red button */
1371 [ 0x0c ] = KEY_RADIO,
1372 [ 0x0d ] = KEY_MENU,
1373 [ 0x0e ] = KEY_SUBTITLE, /* also the # key */
1374 [ 0x0f ] = KEY_MUTE,
1375 [ 0x10 ] = KEY_VOLUMEUP,
1376 [ 0x11 ] = KEY_VOLUMEDOWN,
1377 [ 0x12 ] = KEY_PREVIOUS, /* previous channel */
1378 [ 0x14 ] = KEY_UP,
1379 [ 0x15 ] = KEY_DOWN,
1380 [ 0x16 ] = KEY_LEFT,
1381 [ 0x17 ] = KEY_RIGHT,
1382 [ 0x18 ] = KEY_VIDEO, /* Videos */
1383 [ 0x19 ] = KEY_AUDIO, /* Music */
1384 /* 0x1a: Pictures - presume this means
1385 "Multimedia Home Platform" -
1386 no "PICTURES" key in input.h
1387 */
1388 [ 0x1a ] = KEY_MHP,
1389
1390 [ 0x1b ] = KEY_EPG, /* Guide */
1391 [ 0x1c ] = KEY_TV,
1392 [ 0x1e ] = KEY_NEXTSONG, /* skip >| */
1393 [ 0x1f ] = KEY_EXIT, /* back/exit */
1394 [ 0x20 ] = KEY_CHANNELUP, /* channel / program + */
1395 [ 0x21 ] = KEY_CHANNELDOWN, /* channel / program - */
1396 [ 0x22 ] = KEY_CHANNEL, /* source (old black remote) */
1397 [ 0x24 ] = KEY_PREVIOUSSONG, /* replay |< */
1398 [ 0x25 ] = KEY_ENTER, /* OK */
1399 [ 0x26 ] = KEY_SLEEP, /* minimize (old black remote) */
1400 [ 0x29 ] = KEY_BLUE, /* blue key */
1401 [ 0x2e ] = KEY_GREEN, /* green button */
1402 [ 0x30 ] = KEY_PAUSE, /* pause */
1403 [ 0x32 ] = KEY_REWIND, /* backward << */
1404 [ 0x34 ] = KEY_FASTFORWARD, /* forward >> */
1405 [ 0x35 ] = KEY_PLAY,
1406 [ 0x36 ] = KEY_STOP,
1407 [ 0x37 ] = KEY_RECORD, /* recording */
1408 [ 0x38 ] = KEY_YELLOW, /* yellow key */
1409 [ 0x3b ] = KEY_SELECT, /* top right button */
1410 [ 0x3c ] = KEY_ZOOM, /* full */
1411 [ 0x3d ] = KEY_POWER, /* system power (green button) */
1412};
1413
1414EXPORT_SYMBOL_GPL(ir_codes_hauppauge_new);
1415
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 04c1938b9c91..8cdd4d265ffa 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -21,7 +21,7 @@
21#include <media/saa7146.h> 21#include <media/saa7146.h>
22 22
23LIST_HEAD(saa7146_devices); 23LIST_HEAD(saa7146_devices);
24DECLARE_MUTEX(saa7146_devices_lock); 24DEFINE_MUTEX(saa7146_devices_lock);
25 25
26static int saa7146_num; 26static int saa7146_num;
27 27
@@ -116,8 +116,7 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
116 pg = vmalloc_to_page(virt); 116 pg = vmalloc_to_page(virt);
117 if (NULL == pg) 117 if (NULL == pg)
118 goto err; 118 goto err;
119 if (PageHighMem(pg)) 119 BUG_ON(PageHighMem(pg));
120 BUG();
121 sglist[i].page = pg; 120 sglist[i].page = pg;
122 sglist[i].length = PAGE_SIZE; 121 sglist[i].length = PAGE_SIZE;
123 } 122 }
@@ -402,11 +401,11 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
402 401
403 pci_set_drvdata(pci, dev); 402 pci_set_drvdata(pci, dev);
404 403
405 init_MUTEX(&dev->lock); 404 mutex_init(&dev->lock);
406 spin_lock_init(&dev->int_slock); 405 spin_lock_init(&dev->int_slock);
407 spin_lock_init(&dev->slock); 406 spin_lock_init(&dev->slock);
408 407
409 init_MUTEX(&dev->i2c_lock); 408 mutex_init(&dev->i2c_lock);
410 409
411 dev->module = THIS_MODULE; 410 dev->module = THIS_MODULE;
412 init_waitqueue_head(&dev->i2c_wq); 411 init_waitqueue_head(&dev->i2c_wq);
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index f8cf73ed49ad..3870fa948cc0 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -17,18 +17,18 @@ int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit)
17 } 17 }
18 18
19 /* is it free? */ 19 /* is it free? */
20 down(&dev->lock); 20 mutex_lock(&dev->lock);
21 if (vv->resources & bit) { 21 if (vv->resources & bit) {
22 DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit)); 22 DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
23 /* no, someone else uses it */ 23 /* no, someone else uses it */
24 up(&dev->lock); 24 mutex_unlock(&dev->lock);
25 return 0; 25 return 0;
26 } 26 }
27 /* it's free, grab it */ 27 /* it's free, grab it */
28 fh->resources |= bit; 28 fh->resources |= bit;
29 vv->resources |= bit; 29 vv->resources |= bit;
30 DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources)); 30 DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
31 up(&dev->lock); 31 mutex_unlock(&dev->lock);
32 return 1; 32 return 1;
33} 33}
34 34
@@ -37,14 +37,13 @@ void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits)
37 struct saa7146_dev *dev = fh->dev; 37 struct saa7146_dev *dev = fh->dev;
38 struct saa7146_vv *vv = dev->vv_data; 38 struct saa7146_vv *vv = dev->vv_data;
39 39
40 if ((fh->resources & bits) != bits) 40 BUG_ON((fh->resources & bits) != bits);
41 BUG();
42 41
43 down(&dev->lock); 42 mutex_lock(&dev->lock);
44 fh->resources &= ~bits; 43 fh->resources &= ~bits;
45 vv->resources &= ~bits; 44 vv->resources &= ~bits;
46 DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources)); 45 DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
47 up(&dev->lock); 46 mutex_unlock(&dev->lock);
48} 47}
49 48
50 49
@@ -55,8 +54,7 @@ void saa7146_dma_free(struct saa7146_dev *dev,struct saa7146_buf *buf)
55{ 54{
56 DEB_EE(("dev:%p, buf:%p\n",dev,buf)); 55 DEB_EE(("dev:%p, buf:%p\n",dev,buf));
57 56
58 if (in_interrupt()) 57 BUG_ON(in_interrupt());
59 BUG();
60 58
61 videobuf_waiton(&buf->vb,0,0); 59 videobuf_waiton(&buf->vb,0,0);
62 videobuf_dma_pci_unmap(dev->pci, &buf->vb.dma); 60 videobuf_dma_pci_unmap(dev->pci, &buf->vb.dma);
@@ -204,7 +202,7 @@ static int fops_open(struct inode *inode, struct file *file)
204 202
205 DEB_EE(("inode:%p, file:%p, minor:%d\n",inode,file,minor)); 203 DEB_EE(("inode:%p, file:%p, minor:%d\n",inode,file,minor));
206 204
207 if (down_interruptible(&saa7146_devices_lock)) 205 if (mutex_lock_interruptible(&saa7146_devices_lock))
208 return -ERESTARTSYS; 206 return -ERESTARTSYS;
209 207
210 list_for_each(list,&saa7146_devices) { 208 list_for_each(list,&saa7146_devices) {
@@ -276,7 +274,7 @@ out:
276 kfree(fh); 274 kfree(fh);
277 file->private_data = NULL; 275 file->private_data = NULL;
278 } 276 }
279 up(&saa7146_devices_lock); 277 mutex_unlock(&saa7146_devices_lock);
280 return result; 278 return result;
281} 279}
282 280
@@ -287,7 +285,7 @@ static int fops_release(struct inode *inode, struct file *file)
287 285
288 DEB_EE(("inode:%p, file:%p\n",inode,file)); 286 DEB_EE(("inode:%p, file:%p\n",inode,file));
289 287
290 if (down_interruptible(&saa7146_devices_lock)) 288 if (mutex_lock_interruptible(&saa7146_devices_lock))
291 return -ERESTARTSYS; 289 return -ERESTARTSYS;
292 290
293 if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { 291 if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
@@ -303,7 +301,7 @@ static int fops_release(struct inode *inode, struct file *file)
303 file->private_data = NULL; 301 file->private_data = NULL;
304 kfree(fh); 302 kfree(fh);
305 303
306 up(&saa7146_devices_lock); 304 mutex_unlock(&saa7146_devices_lock);
307 305
308 return 0; 306 return 0;
309} 307}
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c
index 8aabdd8fb3c5..d9953f7a8b6b 100644
--- a/drivers/media/common/saa7146_i2c.c
+++ b/drivers/media/common/saa7146_i2c.c
@@ -279,7 +279,7 @@ int saa7146_i2c_transfer(struct saa7146_dev *dev, const struct i2c_msg *msgs, in
279 int address_err = 0; 279 int address_err = 0;
280 int short_delay = 0; 280 int short_delay = 0;
281 281
282 if (down_interruptible (&dev->i2c_lock)) 282 if (mutex_lock_interruptible(&dev->i2c_lock))
283 return -ERESTARTSYS; 283 return -ERESTARTSYS;
284 284
285 for(i=0;i<num;i++) { 285 for(i=0;i<num;i++) {
@@ -366,7 +366,7 @@ out:
366 } 366 }
367 } 367 }
368 368
369 up(&dev->i2c_lock); 369 mutex_unlock(&dev->i2c_lock);
370 return err; 370 return err;
371} 371}
372 372
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 468d3c959075..500bd3f05e16 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -410,7 +410,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
410 V4L2_FIELD_SEQ_TB, // FIXME: does this really work? 410 V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
411 sizeof(struct saa7146_buf), 411 sizeof(struct saa7146_buf),
412 file); 412 file);
413 init_MUTEX(&fh->vbi_q.lock); 413 mutex_init(&fh->vbi_q.lock);
414 414
415 init_timer(&fh->vbi_read_timeout); 415 init_timer(&fh->vbi_read_timeout);
416 fh->vbi_read_timeout.function = vbi_read_timeout; 416 fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 7ebac7949df3..6b42713d97f4 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -378,20 +378,20 @@ static int s_fmt(struct saa7146_fh *fh, struct v4l2_format *f)
378 err = try_win(dev,&f->fmt.win); 378 err = try_win(dev,&f->fmt.win);
379 if (0 != err) 379 if (0 != err)
380 return err; 380 return err;
381 down(&dev->lock); 381 mutex_lock(&dev->lock);
382 fh->ov.win = f->fmt.win; 382 fh->ov.win = f->fmt.win;
383 fh->ov.nclips = f->fmt.win.clipcount; 383 fh->ov.nclips = f->fmt.win.clipcount;
384 if (fh->ov.nclips > 16) 384 if (fh->ov.nclips > 16)
385 fh->ov.nclips = 16; 385 fh->ov.nclips = 16;
386 if (copy_from_user(fh->ov.clips,f->fmt.win.clips,sizeof(struct v4l2_clip)*fh->ov.nclips)) { 386 if (copy_from_user(fh->ov.clips,f->fmt.win.clips,sizeof(struct v4l2_clip)*fh->ov.nclips)) {
387 up(&dev->lock); 387 mutex_unlock(&dev->lock);
388 return -EFAULT; 388 return -EFAULT;
389 } 389 }
390 390
391 /* fh->ov.fh is used to indicate that we have valid overlay informations, too */ 391 /* fh->ov.fh is used to indicate that we have valid overlay informations, too */
392 fh->ov.fh = fh; 392 fh->ov.fh = fh;
393 393
394 up(&dev->lock); 394 mutex_unlock(&dev->lock);
395 395
396 /* check if our current overlay is active */ 396 /* check if our current overlay is active */
397 if (IS_OVERLAY_ACTIVE(fh) != 0) { 397 if (IS_OVERLAY_ACTIVE(fh) != 0) {
@@ -516,7 +516,7 @@ static int set_control(struct saa7146_fh *fh, struct v4l2_control *c)
516 return -EINVAL; 516 return -EINVAL;
517 } 517 }
518 518
519 down(&dev->lock); 519 mutex_lock(&dev->lock);
520 520
521 switch (ctrl->type) { 521 switch (ctrl->type) {
522 case V4L2_CTRL_TYPE_BOOLEAN: 522 case V4L2_CTRL_TYPE_BOOLEAN:
@@ -560,7 +560,7 @@ static int set_control(struct saa7146_fh *fh, struct v4l2_control *c)
560 /* fixme: we can support changing VFLIP and HFLIP here... */ 560 /* fixme: we can support changing VFLIP and HFLIP here... */
561 if (IS_CAPTURE_ACTIVE(fh) != 0) { 561 if (IS_CAPTURE_ACTIVE(fh) != 0) {
562 DEB_D(("V4L2_CID_HFLIP while active capture.\n")); 562 DEB_D(("V4L2_CID_HFLIP while active capture.\n"));
563 up(&dev->lock); 563 mutex_unlock(&dev->lock);
564 return -EINVAL; 564 return -EINVAL;
565 } 565 }
566 vv->hflip = c->value; 566 vv->hflip = c->value;
@@ -568,7 +568,7 @@ static int set_control(struct saa7146_fh *fh, struct v4l2_control *c)
568 case V4L2_CID_VFLIP: 568 case V4L2_CID_VFLIP:
569 if (IS_CAPTURE_ACTIVE(fh) != 0) { 569 if (IS_CAPTURE_ACTIVE(fh) != 0) {
570 DEB_D(("V4L2_CID_VFLIP while active capture.\n")); 570 DEB_D(("V4L2_CID_VFLIP while active capture.\n"));
571 up(&dev->lock); 571 mutex_unlock(&dev->lock);
572 return -EINVAL; 572 return -EINVAL;
573 } 573 }
574 vv->vflip = c->value; 574 vv->vflip = c->value;
@@ -577,7 +577,7 @@ static int set_control(struct saa7146_fh *fh, struct v4l2_control *c)
577 return -EINVAL; 577 return -EINVAL;
578 } 578 }
579 } 579 }
580 up(&dev->lock); 580 mutex_unlock(&dev->lock);
581 581
582 if (IS_OVERLAY_ACTIVE(fh) != 0) { 582 if (IS_OVERLAY_ACTIVE(fh) != 0) {
583 saa7146_stop_preview(fh); 583 saa7146_stop_preview(fh);
@@ -939,7 +939,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
939 } 939 }
940 } 940 }
941 941
942 down(&dev->lock); 942 mutex_lock(&dev->lock);
943 943
944 /* ok, accept it */ 944 /* ok, accept it */
945 vv->ov_fb = *fb; 945 vv->ov_fb = *fb;
@@ -948,7 +948,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
948 vv->ov_fb.fmt.bytesperline = 948 vv->ov_fb.fmt.bytesperline =
949 vv->ov_fb.fmt.width*fmt->depth/8; 949 vv->ov_fb.fmt.width*fmt->depth/8;
950 950
951 up(&dev->lock); 951 mutex_unlock(&dev->lock);
952 952
953 return 0; 953 return 0;
954 } 954 }
@@ -1086,7 +1086,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1086 } 1086 }
1087 } 1087 }
1088 1088
1089 down(&dev->lock); 1089 mutex_lock(&dev->lock);
1090 1090
1091 for(i = 0; i < dev->ext_vv_data->num_stds; i++) 1091 for(i = 0; i < dev->ext_vv_data->num_stds; i++)
1092 if (*id & dev->ext_vv_data->stds[i].id) 1092 if (*id & dev->ext_vv_data->stds[i].id)
@@ -1098,7 +1098,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1098 found = 1; 1098 found = 1;
1099 } 1099 }
1100 1100
1101 up(&dev->lock); 1101 mutex_unlock(&dev->lock);
1102 1102
1103 if (vv->ov_suspend != NULL) { 1103 if (vv->ov_suspend != NULL) {
1104 saa7146_start_preview(vv->ov_suspend); 1104 saa7146_start_preview(vv->ov_suspend);
@@ -1201,11 +1201,11 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1201 DEB_D(("VIDIOCGMBUF \n")); 1201 DEB_D(("VIDIOCGMBUF \n"));
1202 1202
1203 q = &fh->video_q; 1203 q = &fh->video_q;
1204 down(&q->lock); 1204 mutex_lock(&q->lock);
1205 err = videobuf_mmap_setup(q,gbuffers,gbufsize, 1205 err = videobuf_mmap_setup(q,gbuffers,gbufsize,
1206 V4L2_MEMORY_MMAP); 1206 V4L2_MEMORY_MMAP);
1207 if (err < 0) { 1207 if (err < 0) {
1208 up(&q->lock); 1208 mutex_unlock(&q->lock);
1209 return err; 1209 return err;
1210 } 1210 }
1211 memset(mbuf,0,sizeof(*mbuf)); 1211 memset(mbuf,0,sizeof(*mbuf));
@@ -1213,7 +1213,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1213 mbuf->size = gbuffers * gbufsize; 1213 mbuf->size = gbuffers * gbufsize;
1214 for (i = 0; i < gbuffers; i++) 1214 for (i = 0; i < gbuffers; i++)
1215 mbuf->offsets[i] = i * gbufsize; 1215 mbuf->offsets[i] = i * gbufsize;
1216 up(&q->lock); 1216 mutex_unlock(&q->lock);
1217 return 0; 1217 return 0;
1218 } 1218 }
1219 default: 1219 default:
@@ -1414,7 +1414,7 @@ static int video_open(struct saa7146_dev *dev, struct file *file)
1414 sizeof(struct saa7146_buf), 1414 sizeof(struct saa7146_buf),
1415 file); 1415 file);
1416 1416
1417 init_MUTEX(&fh->video_q.lock); 1417 mutex_init(&fh->video_q.lock);
1418 1418
1419 return 0; 1419 return 0;
1420} 1420}
diff --git a/drivers/media/dvb/b2c2/flexcop-common.h b/drivers/media/dvb/b2c2/flexcop-common.h
index 7d7e1613c5a7..b3dd0603cd92 100644
--- a/drivers/media/dvb/b2c2/flexcop-common.h
+++ b/drivers/media/dvb/b2c2/flexcop-common.h
@@ -10,6 +10,7 @@
10 10
11#include <linux/config.h> 11#include <linux/config.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/mutex.h>
13 14
14#include "flexcop-reg.h" 15#include "flexcop-reg.h"
15 16
@@ -73,8 +74,7 @@ struct flexcop_device {
73 int (*fe_sleep) (struct dvb_frontend *); 74 int (*fe_sleep) (struct dvb_frontend *);
74 75
75 struct i2c_adapter i2c_adap; 76 struct i2c_adapter i2c_adap;
76 struct semaphore i2c_sem; 77 struct mutex i2c_mutex;
77
78 struct module *owner; 78 struct module *owner;
79 79
80 /* options and status */ 80 /* options and status */
diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c
index 56495cb6cd02..e0bd2d8f0f0c 100644
--- a/drivers/media/dvb/b2c2/flexcop-i2c.c
+++ b/drivers/media/dvb/b2c2/flexcop-i2c.c
@@ -135,7 +135,7 @@ static int flexcop_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs
135 struct flexcop_device *fc = i2c_get_adapdata(i2c_adap); 135 struct flexcop_device *fc = i2c_get_adapdata(i2c_adap);
136 int i, ret = 0; 136 int i, ret = 0;
137 137
138 if (down_interruptible(&fc->i2c_sem)) 138 if (mutex_lock_interruptible(&fc->i2c_mutex))
139 return -ERESTARTSYS; 139 return -ERESTARTSYS;
140 140
141 /* reading */ 141 /* reading */
@@ -161,7 +161,7 @@ static int flexcop_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs
161 else 161 else
162 ret = num; 162 ret = num;
163 163
164 up(&fc->i2c_sem); 164 mutex_unlock(&fc->i2c_mutex);
165 165
166 return ret; 166 return ret;
167} 167}
@@ -180,7 +180,7 @@ int flexcop_i2c_init(struct flexcop_device *fc)
180{ 180{
181 int ret; 181 int ret;
182 182
183 sema_init(&fc->i2c_sem,1); 183 mutex_init(&fc->i2c_mutex);
184 184
185 memset(&fc->i2c_adap, 0, sizeof(struct i2c_adapter)); 185 memset(&fc->i2c_adap, 0, sizeof(struct i2c_adapter));
186 strncpy(fc->i2c_adap.name, "B2C2 FlexCop device",I2C_NAME_SIZE); 186 strncpy(fc->i2c_adap.name, "B2C2 FlexCop device",I2C_NAME_SIZE);
diff --git a/drivers/media/dvb/bt8xx/Makefile b/drivers/media/dvb/bt8xx/Makefile
index d188e4c670b5..9d197efb481d 100644
--- a/drivers/media/dvb/bt8xx/Makefile
+++ b/drivers/media/dvb/bt8xx/Makefile
@@ -1,3 +1,3 @@
1obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o 1obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
2 2
3EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/video -Idrivers/media/dvb/frontends 3EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/video/bt8xx -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/bt8xx/bt878.c b/drivers/media/dvb/bt8xx/bt878.c
index 356f447ee2ab..5500f8a0ffe2 100644
--- a/drivers/media/dvb/bt8xx/bt878.c
+++ b/drivers/media/dvb/bt8xx/bt878.c
@@ -344,7 +344,7 @@ bt878_device_control(struct bt878 *bt, unsigned int cmd, union dst_gpio_packet *
344 int retval; 344 int retval;
345 345
346 retval = 0; 346 retval = 0;
347 if (down_interruptible (&bt->gpio_lock)) 347 if (mutex_lock_interruptible(&bt->gpio_lock))
348 return -ERESTARTSYS; 348 return -ERESTARTSYS;
349 /* special gpio signal */ 349 /* special gpio signal */
350 switch (cmd) { 350 switch (cmd) {
@@ -375,7 +375,7 @@ bt878_device_control(struct bt878 *bt, unsigned int cmd, union dst_gpio_packet *
375 retval = -EINVAL; 375 retval = -EINVAL;
376 break; 376 break;
377 } 377 }
378 up(&bt->gpio_lock); 378 mutex_unlock(&bt->gpio_lock);
379 return retval; 379 return retval;
380} 380}
381 381
diff --git a/drivers/media/dvb/bt8xx/bt878.h b/drivers/media/dvb/bt8xx/bt878.h
index 9faf93770d08..f685bc129609 100644
--- a/drivers/media/dvb/bt8xx/bt878.h
+++ b/drivers/media/dvb/bt8xx/bt878.h
@@ -25,6 +25,8 @@
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/mutex.h>
29
28#include "bt848.h" 30#include "bt848.h"
29#include "bttv.h" 31#include "bttv.h"
30 32
@@ -108,7 +110,7 @@ struct cards {
108extern int bt878_num; 110extern int bt878_num;
109 111
110struct bt878 { 112struct bt878 {
111 struct semaphore gpio_lock; 113 struct mutex gpio_lock;
112 unsigned int nr; 114 unsigned int nr;
113 unsigned int bttv_nr; 115 unsigned int bttv_nr;
114 struct i2c_adapter *adapter; 116 struct i2c_adapter *adapter;
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index 0310e3dd07e6..1cfa5e5035d8 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -910,7 +910,7 @@ static int dst_get_device_id(struct dst_state *state)
910 910
911static int dst_probe(struct dst_state *state) 911static int dst_probe(struct dst_state *state)
912{ 912{
913 sema_init(&state->dst_mutex, 1); 913 mutex_init(&state->dst_mutex);
914 if ((rdc_8820_reset(state)) < 0) { 914 if ((rdc_8820_reset(state)) < 0) {
915 dprintk(verbose, DST_ERROR, 1, "RDC 8820 RESET Failed."); 915 dprintk(verbose, DST_ERROR, 1, "RDC 8820 RESET Failed.");
916 return -1; 916 return -1;
@@ -962,7 +962,7 @@ int dst_command(struct dst_state *state, u8 *data, u8 len)
962{ 962{
963 u8 reply; 963 u8 reply;
964 964
965 down(&state->dst_mutex); 965 mutex_lock(&state->dst_mutex);
966 if ((dst_comm_init(state)) < 0) { 966 if ((dst_comm_init(state)) < 0) {
967 dprintk(verbose, DST_NOTICE, 1, "DST Communication Initialization Failed."); 967 dprintk(verbose, DST_NOTICE, 1, "DST Communication Initialization Failed.");
968 goto error; 968 goto error;
@@ -1013,11 +1013,11 @@ int dst_command(struct dst_state *state, u8 *data, u8 len)
1013 dprintk(verbose, DST_INFO, 1, "checksum failure"); 1013 dprintk(verbose, DST_INFO, 1, "checksum failure");
1014 goto error; 1014 goto error;
1015 } 1015 }
1016 up(&state->dst_mutex); 1016 mutex_unlock(&state->dst_mutex);
1017 return 0; 1017 return 0;
1018 1018
1019error: 1019error:
1020 up(&state->dst_mutex); 1020 mutex_unlock(&state->dst_mutex);
1021 return -EIO; 1021 return -EIO;
1022 1022
1023} 1023}
@@ -1128,7 +1128,7 @@ static int dst_write_tuna(struct dvb_frontend *fe)
1128 dst_set_voltage(fe, SEC_VOLTAGE_13); 1128 dst_set_voltage(fe, SEC_VOLTAGE_13);
1129 } 1129 }
1130 state->diseq_flags &= ~(HAS_LOCK | ATTEMPT_TUNE); 1130 state->diseq_flags &= ~(HAS_LOCK | ATTEMPT_TUNE);
1131 down(&state->dst_mutex); 1131 mutex_lock(&state->dst_mutex);
1132 if ((dst_comm_init(state)) < 0) { 1132 if ((dst_comm_init(state)) < 0) {
1133 dprintk(verbose, DST_DEBUG, 1, "DST Communication initialization failed."); 1133 dprintk(verbose, DST_DEBUG, 1, "DST Communication initialization failed.");
1134 goto error; 1134 goto error;
@@ -1160,11 +1160,11 @@ static int dst_write_tuna(struct dvb_frontend *fe)
1160 state->diseq_flags |= ATTEMPT_TUNE; 1160 state->diseq_flags |= ATTEMPT_TUNE;
1161 retval = dst_get_tuna(state); 1161 retval = dst_get_tuna(state);
1162werr: 1162werr:
1163 up(&state->dst_mutex); 1163 mutex_unlock(&state->dst_mutex);
1164 return retval; 1164 return retval;
1165 1165
1166error: 1166error:
1167 up(&state->dst_mutex); 1167 mutex_unlock(&state->dst_mutex);
1168 return -EIO; 1168 return -EIO;
1169} 1169}
1170 1170
diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c
index c650b4bf7f5f..f6b49a801eba 100644
--- a/drivers/media/dvb/bt8xx/dst_ca.c
+++ b/drivers/media/dvb/bt8xx/dst_ca.c
@@ -81,7 +81,7 @@ static int dst_ci_command(struct dst_state* state, u8 * data, u8 *ca_string, u8
81{ 81{
82 u8 reply; 82 u8 reply;
83 83
84 down(&state->dst_mutex); 84 mutex_lock(&state->dst_mutex);
85 dst_comm_init(state); 85 dst_comm_init(state);
86 msleep(65); 86 msleep(65);
87 87
@@ -110,11 +110,11 @@ static int dst_ci_command(struct dst_state* state, u8 * data, u8 *ca_string, u8
110 goto error; 110 goto error;
111 } 111 }
112 } 112 }
113 up(&state->dst_mutex); 113 mutex_unlock(&state->dst_mutex);
114 return 0; 114 return 0;
115 115
116error: 116error:
117 up(&state->dst_mutex); 117 mutex_unlock(&state->dst_mutex);
118 return -EIO; 118 return -EIO;
119} 119}
120 120
diff --git a/drivers/media/dvb/bt8xx/dst_common.h b/drivers/media/dvb/bt8xx/dst_common.h
index 81557f38fe38..51d4e043716c 100644
--- a/drivers/media/dvb/bt8xx/dst_common.h
+++ b/drivers/media/dvb/bt8xx/dst_common.h
@@ -25,6 +25,7 @@
25#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
26#include <linux/dvb/frontend.h> 26#include <linux/dvb/frontend.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/mutex.h>
28#include "bt878.h" 29#include "bt878.h"
29 30
30#include "dst_ca.h" 31#include "dst_ca.h"
@@ -121,7 +122,7 @@ struct dst_state {
121 u8 vendor[8]; 122 u8 vendor[8];
122 u8 board_info[8]; 123 u8 board_info[8];
123 124
124 struct semaphore dst_mutex; 125 struct mutex dst_mutex;
125}; 126};
126 127
127struct dst_types { 128struct dst_types {
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index ea27b15007e9..baa8227ef87c 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -76,13 +76,13 @@ static int dvb_bt8xx_start_feed(struct dvb_demux_feed *dvbdmxfeed)
76 if (!dvbdmx->dmx.frontend) 76 if (!dvbdmx->dmx.frontend)
77 return -EINVAL; 77 return -EINVAL;
78 78
79 down(&card->lock); 79 mutex_lock(&card->lock);
80 card->nfeeds++; 80 card->nfeeds++;
81 rc = card->nfeeds; 81 rc = card->nfeeds;
82 if (card->nfeeds == 1) 82 if (card->nfeeds == 1)
83 bt878_start(card->bt, card->gpio_mode, 83 bt878_start(card->bt, card->gpio_mode,
84 card->op_sync_orin, card->irq_err_ignore); 84 card->op_sync_orin, card->irq_err_ignore);
85 up(&card->lock); 85 mutex_unlock(&card->lock);
86 return rc; 86 return rc;
87} 87}
88 88
@@ -96,11 +96,11 @@ static int dvb_bt8xx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
96 if (!dvbdmx->dmx.frontend) 96 if (!dvbdmx->dmx.frontend)
97 return -EINVAL; 97 return -EINVAL;
98 98
99 down(&card->lock); 99 mutex_lock(&card->lock);
100 card->nfeeds--; 100 card->nfeeds--;
101 if (card->nfeeds == 0) 101 if (card->nfeeds == 0)
102 bt878_stop(card->bt); 102 bt878_stop(card->bt);
103 up(&card->lock); 103 mutex_unlock(&card->lock);
104 104
105 return 0; 105 return 0;
106} 106}
@@ -239,6 +239,20 @@ static int cx24108_pll_set(struct dvb_frontend* fe, struct dvb_frontend_paramete
239 239
240static int pinnsat_pll_init(struct dvb_frontend* fe) 240static int pinnsat_pll_init(struct dvb_frontend* fe)
241{ 241{
242 struct dvb_bt8xx_card *card = fe->dvb->priv;
243
244 bttv_gpio_enable(card->bttv_nr, 1, 1); /* output */
245 bttv_write_gpio(card->bttv_nr, 1, 1); /* relay on */
246
247 return 0;
248}
249
250static int pinnsat_pll_sleep(struct dvb_frontend* fe)
251{
252 struct dvb_bt8xx_card *card = fe->dvb->priv;
253
254 bttv_write_gpio(card->bttv_nr, 1, 0); /* relay off */
255
242 return 0; 256 return 0;
243} 257}
244 258
@@ -246,6 +260,7 @@ static struct cx24110_config pctvsat_config = {
246 .demod_address = 0x55, 260 .demod_address = 0x55,
247 .pll_init = pinnsat_pll_init, 261 .pll_init = pinnsat_pll_init,
248 .pll_set = cx24108_pll_set, 262 .pll_set = cx24108_pll_set,
263 .pll_sleep = pinnsat_pll_sleep,
249}; 264};
250 265
251static int microtune_mt7202dtf_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) 266static int microtune_mt7202dtf_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
@@ -788,7 +803,7 @@ static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
788 if (!(card = kzalloc(sizeof(struct dvb_bt8xx_card), GFP_KERNEL))) 803 if (!(card = kzalloc(sizeof(struct dvb_bt8xx_card), GFP_KERNEL)))
789 return -ENOMEM; 804 return -ENOMEM;
790 805
791 init_MUTEX(&card->lock); 806 mutex_init(&card->lock);
792 card->bttv_nr = sub->core->nr; 807 card->bttv_nr = sub->core->nr;
793 strncpy(card->card_name, sub->core->name, sizeof(sub->core->name)); 808 strncpy(card->card_name, sub->core->name, sizeof(sub->core->name));
794 card->i2c_adapter = &sub->core->i2c_adap; 809 card->i2c_adapter = &sub->core->i2c_adap;
@@ -798,14 +813,14 @@ static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
798 card->gpio_mode = 0x0400c060; 813 card->gpio_mode = 0x0400c060;
799 /* should be: BT878_A_GAIN=0,BT878_A_PWRDN,BT878_DA_DPM,BT878_DA_SBR, 814 /* should be: BT878_A_GAIN=0,BT878_A_PWRDN,BT878_DA_DPM,BT878_DA_SBR,
800 BT878_DA_IOM=1,BT878_DA_APP to enable serial highspeed mode. */ 815 BT878_DA_IOM=1,BT878_DA_APP to enable serial highspeed mode. */
801 card->op_sync_orin = 0; 816 card->op_sync_orin = BT878_RISC_SYNC_MASK;
802 card->irq_err_ignore = 0; 817 card->irq_err_ignore = BT878_AFBUS | BT878_AFDSR;
803 break; 818 break;
804 819
805 case BTTV_BOARD_DVICO_DVBT_LITE: 820 case BTTV_BOARD_DVICO_DVBT_LITE:
806 card->gpio_mode = 0x0400C060; 821 card->gpio_mode = 0x0400C060;
807 card->op_sync_orin = 0; 822 card->op_sync_orin = BT878_RISC_SYNC_MASK;
808 card->irq_err_ignore = 0; 823 card->irq_err_ignore = BT878_AFBUS | BT878_AFDSR;
809 /* 26, 15, 14, 6, 5 824 /* 26, 15, 14, 6, 5
810 * A_PWRDN DA_DPM DA_SBR DA_IOM_DA 825 * A_PWRDN DA_DPM DA_SBR DA_IOM_DA
811 * DA_APP(parallel) */ 826 * DA_APP(parallel) */
@@ -820,15 +835,15 @@ static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
820 case BTTV_BOARD_NEBULA_DIGITV: 835 case BTTV_BOARD_NEBULA_DIGITV:
821 case BTTV_BOARD_AVDVBT_761: 836 case BTTV_BOARD_AVDVBT_761:
822 card->gpio_mode = (1 << 26) | (1 << 14) | (1 << 5); 837 card->gpio_mode = (1 << 26) | (1 << 14) | (1 << 5);
823 card->op_sync_orin = 0; 838 card->op_sync_orin = BT878_RISC_SYNC_MASK;
824 card->irq_err_ignore = 0; 839 card->irq_err_ignore = BT878_AFBUS | BT878_AFDSR;
825 /* A_PWRDN DA_SBR DA_APP (high speed serial) */ 840 /* A_PWRDN DA_SBR DA_APP (high speed serial) */
826 break; 841 break;
827 842
828 case BTTV_BOARD_AVDVBT_771: //case 0x07711461: 843 case BTTV_BOARD_AVDVBT_771: //case 0x07711461:
829 card->gpio_mode = 0x0400402B; 844 card->gpio_mode = 0x0400402B;
830 card->op_sync_orin = BT878_RISC_SYNC_MASK; 845 card->op_sync_orin = BT878_RISC_SYNC_MASK;
831 card->irq_err_ignore = 0; 846 card->irq_err_ignore = BT878_AFBUS | BT878_AFDSR;
832 /* A_PWRDN DA_SBR DA_APP[0] PKTP=10 RISC_ENABLE FIFO_ENABLE*/ 847 /* A_PWRDN DA_SBR DA_APP[0] PKTP=10 RISC_ENABLE FIFO_ENABLE*/
833 break; 848 break;
834 849
@@ -852,8 +867,8 @@ static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
852 867
853 case BTTV_BOARD_PC_HDTV: 868 case BTTV_BOARD_PC_HDTV:
854 card->gpio_mode = 0x0100EC7B; 869 card->gpio_mode = 0x0100EC7B;
855 card->op_sync_orin = 0; 870 card->op_sync_orin = BT878_RISC_SYNC_MASK;
856 card->irq_err_ignore = 0; 871 card->irq_err_ignore = BT878_AFBUS | BT878_AFDSR;
857 break; 872 break;
858 873
859 default: 874 default:
@@ -881,7 +896,7 @@ static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
881 return -EFAULT; 896 return -EFAULT;
882 } 897 }
883 898
884 init_MUTEX(&card->bt->gpio_lock); 899 mutex_init(&card->bt->gpio_lock);
885 card->bt->bttv_nr = sub->core->nr; 900 card->bt->bttv_nr = sub->core->nr;
886 901
887 if ( (ret = dvb_bt8xx_load_card(card, sub->core->type)) ) { 902 if ( (ret = dvb_bt8xx_load_card(card, sub->core->type)) ) {
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.h b/drivers/media/dvb/bt8xx/dvb-bt8xx.h
index cf035a80361c..00dd9fa54c82 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.h
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.h
@@ -26,6 +26,7 @@
26#define DVB_BT8XX_H 26#define DVB_BT8XX_H
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/mutex.h>
29#include "dvbdev.h" 30#include "dvbdev.h"
30#include "dvb_net.h" 31#include "dvb_net.h"
31#include "bttv.h" 32#include "bttv.h"
@@ -38,7 +39,7 @@
38#include "lgdt330x.h" 39#include "lgdt330x.h"
39 40
40struct dvb_bt8xx_card { 41struct dvb_bt8xx_card {
41 struct semaphore lock; 42 struct mutex lock;
42 int nfeeds; 43 int nfeeds;
43 char card_name[32]; 44 char card_name[32];
44 struct dvb_adapter dvb_adapter; 45 struct dvb_adapter dvb_adapter;
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index c4b4c5b6b7c8..71b575dc22bd 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/input.h> 31#include <linux/input.h>
32#include <linux/dvb/frontend.h> 32#include <linux/dvb/frontend.h>
33#include <linux/mutex.h>
33 34
34#include "dmxdev.h" 35#include "dmxdev.h"
35#include "dvb_demux.h" 36#include "dvb_demux.h"
@@ -116,7 +117,7 @@ static struct dvb_frontend_info cinergyt2_fe_info = {
116struct cinergyt2 { 117struct cinergyt2 {
117 struct dvb_demux demux; 118 struct dvb_demux demux;
118 struct usb_device *udev; 119 struct usb_device *udev;
119 struct semaphore sem; 120 struct mutex sem;
120 struct dvb_adapter adapter; 121 struct dvb_adapter adapter;
121 struct dvb_device *fedev; 122 struct dvb_device *fedev;
122 struct dmxdev dmxdev; 123 struct dmxdev dmxdev;
@@ -345,14 +346,14 @@ static int cinergyt2_start_feed(struct dvb_demux_feed *dvbdmxfeed)
345 struct dvb_demux *demux = dvbdmxfeed->demux; 346 struct dvb_demux *demux = dvbdmxfeed->demux;
346 struct cinergyt2 *cinergyt2 = demux->priv; 347 struct cinergyt2 *cinergyt2 = demux->priv;
347 348
348 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 349 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
349 return -ERESTARTSYS; 350 return -ERESTARTSYS;
350 351
351 if (cinergyt2->streaming == 0) 352 if (cinergyt2->streaming == 0)
352 cinergyt2_start_stream_xfer(cinergyt2); 353 cinergyt2_start_stream_xfer(cinergyt2);
353 354
354 cinergyt2->streaming++; 355 cinergyt2->streaming++;
355 up(&cinergyt2->sem); 356 mutex_unlock(&cinergyt2->sem);
356 return 0; 357 return 0;
357} 358}
358 359
@@ -361,13 +362,13 @@ static int cinergyt2_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
361 struct dvb_demux *demux = dvbdmxfeed->demux; 362 struct dvb_demux *demux = dvbdmxfeed->demux;
362 struct cinergyt2 *cinergyt2 = demux->priv; 363 struct cinergyt2 *cinergyt2 = demux->priv;
363 364
364 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 365 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
365 return -ERESTARTSYS; 366 return -ERESTARTSYS;
366 367
367 if (--cinergyt2->streaming == 0) 368 if (--cinergyt2->streaming == 0)
368 cinergyt2_stop_stream_xfer(cinergyt2); 369 cinergyt2_stop_stream_xfer(cinergyt2);
369 370
370 up(&cinergyt2->sem); 371 mutex_unlock(&cinergyt2->sem);
371 return 0; 372 return 0;
372} 373}
373 374
@@ -483,11 +484,11 @@ static int cinergyt2_open (struct inode *inode, struct file *file)
483 struct cinergyt2 *cinergyt2 = dvbdev->priv; 484 struct cinergyt2 *cinergyt2 = dvbdev->priv;
484 int err = -ERESTARTSYS; 485 int err = -ERESTARTSYS;
485 486
486 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 487 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
487 return -ERESTARTSYS; 488 return -ERESTARTSYS;
488 489
489 if ((err = dvb_generic_open(inode, file))) { 490 if ((err = dvb_generic_open(inode, file))) {
490 up(&cinergyt2->sem); 491 mutex_unlock(&cinergyt2->sem);
491 return err; 492 return err;
492 } 493 }
493 494
@@ -499,12 +500,15 @@ static int cinergyt2_open (struct inode *inode, struct file *file)
499 500
500 atomic_inc(&cinergyt2->inuse); 501 atomic_inc(&cinergyt2->inuse);
501 502
502 up(&cinergyt2->sem); 503 mutex_unlock(&cinergyt2->sem);
503 return 0; 504 return 0;
504} 505}
505 506
506static void cinergyt2_unregister(struct cinergyt2 *cinergyt2) 507static void cinergyt2_unregister(struct cinergyt2 *cinergyt2)
507{ 508{
509 dvb_net_release(&cinergyt2->dvbnet);
510 dvb_dmxdev_release(&cinergyt2->dmxdev);
511 dvb_dmx_release(&cinergyt2->demux);
508 dvb_unregister_device(cinergyt2->fedev); 512 dvb_unregister_device(cinergyt2->fedev);
509 dvb_unregister_adapter(&cinergyt2->adapter); 513 dvb_unregister_adapter(&cinergyt2->adapter);
510 514
@@ -517,7 +521,7 @@ static int cinergyt2_release (struct inode *inode, struct file *file)
517 struct dvb_device *dvbdev = file->private_data; 521 struct dvb_device *dvbdev = file->private_data;
518 struct cinergyt2 *cinergyt2 = dvbdev->priv; 522 struct cinergyt2 *cinergyt2 = dvbdev->priv;
519 523
520 if (down_interruptible(&cinergyt2->sem)) 524 if (mutex_lock_interruptible(&cinergyt2->sem))
521 return -ERESTARTSYS; 525 return -ERESTARTSYS;
522 526
523 if (!cinergyt2->disconnect_pending && (file->f_flags & O_ACCMODE) != O_RDONLY) { 527 if (!cinergyt2->disconnect_pending && (file->f_flags & O_ACCMODE) != O_RDONLY) {
@@ -526,7 +530,7 @@ static int cinergyt2_release (struct inode *inode, struct file *file)
526 cinergyt2_sleep(cinergyt2, 1); 530 cinergyt2_sleep(cinergyt2, 1);
527 } 531 }
528 532
529 up(&cinergyt2->sem); 533 mutex_unlock(&cinergyt2->sem);
530 534
531 if (atomic_dec_and_test(&cinergyt2->inuse) && cinergyt2->disconnect_pending) { 535 if (atomic_dec_and_test(&cinergyt2->inuse) && cinergyt2->disconnect_pending) {
532 warn("delayed unregister in release"); 536 warn("delayed unregister in release");
@@ -541,12 +545,12 @@ static unsigned int cinergyt2_poll (struct file *file, struct poll_table_struct
541 struct dvb_device *dvbdev = file->private_data; 545 struct dvb_device *dvbdev = file->private_data;
542 struct cinergyt2 *cinergyt2 = dvbdev->priv; 546 struct cinergyt2 *cinergyt2 = dvbdev->priv;
543 547
544 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 548 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
545 return -ERESTARTSYS; 549 return -ERESTARTSYS;
546 550
547 poll_wait(file, &cinergyt2->poll_wq, wait); 551 poll_wait(file, &cinergyt2->poll_wq, wait);
548 552
549 up(&cinergyt2->sem); 553 mutex_unlock(&cinergyt2->sem);
550 554
551 return (POLLIN | POLLRDNORM | POLLPRI); 555 return (POLLIN | POLLRDNORM | POLLPRI);
552} 556}
@@ -613,7 +617,7 @@ static int cinergyt2_ioctl (struct inode *inode, struct file *file,
613 if (copy_from_user(&p, (void __user*) arg, sizeof(p))) 617 if (copy_from_user(&p, (void __user*) arg, sizeof(p)))
614 return -EFAULT; 618 return -EFAULT;
615 619
616 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 620 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
617 return -ERESTARTSYS; 621 return -ERESTARTSYS;
618 622
619 param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; 623 param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
@@ -629,7 +633,7 @@ static int cinergyt2_ioctl (struct inode *inode, struct file *file,
629 (char *) param, sizeof(*param), 633 (char *) param, sizeof(*param),
630 NULL, 0); 634 NULL, 0);
631 635
632 up(&cinergyt2->sem); 636 mutex_unlock(&cinergyt2->sem);
633 637
634 return (err < 0) ? err : 0; 638 return (err < 0) ? err : 0;
635 } 639 }
@@ -724,7 +728,7 @@ static void cinergyt2_query_rc (void *data)
724 struct cinergyt2_rc_event rc_events[12]; 728 struct cinergyt2_rc_event rc_events[12];
725 int n, len, i; 729 int n, len, i;
726 730
727 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 731 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
728 return; 732 return;
729 733
730 len = cinergyt2_command(cinergyt2, buf, sizeof(buf), 734 len = cinergyt2_command(cinergyt2, buf, sizeof(buf),
@@ -784,7 +788,7 @@ out:
784 schedule_delayed_work(&cinergyt2->rc_query_work, 788 schedule_delayed_work(&cinergyt2->rc_query_work,
785 msecs_to_jiffies(RC_QUERY_INTERVAL)); 789 msecs_to_jiffies(RC_QUERY_INTERVAL));
786 790
787 up(&cinergyt2->sem); 791 mutex_unlock(&cinergyt2->sem);
788} 792}
789 793
790static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2) 794static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2)
@@ -849,7 +853,7 @@ static void cinergyt2_query (void *data)
849 uint8_t lock_bits; 853 uint8_t lock_bits;
850 uint32_t unc; 854 uint32_t unc;
851 855
852 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 856 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
853 return; 857 return;
854 858
855 unc = s->uncorrected_block_count; 859 unc = s->uncorrected_block_count;
@@ -868,7 +872,7 @@ static void cinergyt2_query (void *data)
868 schedule_delayed_work(&cinergyt2->query_work, 872 schedule_delayed_work(&cinergyt2->query_work,
869 msecs_to_jiffies(QUERY_INTERVAL)); 873 msecs_to_jiffies(QUERY_INTERVAL));
870 874
871 up(&cinergyt2->sem); 875 mutex_unlock(&cinergyt2->sem);
872} 876}
873 877
874static int cinergyt2_probe (struct usb_interface *intf, 878static int cinergyt2_probe (struct usb_interface *intf,
@@ -885,7 +889,7 @@ static int cinergyt2_probe (struct usb_interface *intf,
885 memset (cinergyt2, 0, sizeof (struct cinergyt2)); 889 memset (cinergyt2, 0, sizeof (struct cinergyt2));
886 usb_set_intfdata (intf, (void *) cinergyt2); 890 usb_set_intfdata (intf, (void *) cinergyt2);
887 891
888 init_MUTEX(&cinergyt2->sem); 892 mutex_init(&cinergyt2->sem);
889 init_waitqueue_head (&cinergyt2->poll_wq); 893 init_waitqueue_head (&cinergyt2->poll_wq);
890 INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2); 894 INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
891 895
@@ -937,6 +941,7 @@ static int cinergyt2_probe (struct usb_interface *intf,
937 return 0; 941 return 0;
938 942
939bailout: 943bailout:
944 dvb_net_release(&cinergyt2->dvbnet);
940 dvb_dmxdev_release(&cinergyt2->dmxdev); 945 dvb_dmxdev_release(&cinergyt2->dmxdev);
941 dvb_dmx_release(&cinergyt2->demux); 946 dvb_dmx_release(&cinergyt2->demux);
942 dvb_unregister_adapter(&cinergyt2->adapter); 947 dvb_unregister_adapter(&cinergyt2->adapter);
@@ -967,7 +972,7 @@ static int cinergyt2_suspend (struct usb_interface *intf, pm_message_t state)
967{ 972{
968 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf); 973 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
969 974
970 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 975 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
971 return -ERESTARTSYS; 976 return -ERESTARTSYS;
972 977
973 if (state.event > PM_EVENT_ON) { 978 if (state.event > PM_EVENT_ON) {
@@ -981,7 +986,7 @@ static int cinergyt2_suspend (struct usb_interface *intf, pm_message_t state)
981 cinergyt2_sleep(cinergyt2, 1); 986 cinergyt2_sleep(cinergyt2, 1);
982 } 987 }
983 988
984 up(&cinergyt2->sem); 989 mutex_unlock(&cinergyt2->sem);
985 return 0; 990 return 0;
986} 991}
987 992
@@ -990,7 +995,7 @@ static int cinergyt2_resume (struct usb_interface *intf)
990 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf); 995 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
991 struct dvbt_set_parameters_msg *param = &cinergyt2->param; 996 struct dvbt_set_parameters_msg *param = &cinergyt2->param;
992 997
993 if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem)) 998 if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
994 return -ERESTARTSYS; 999 return -ERESTARTSYS;
995 1000
996 if (!cinergyt2->sleeping) { 1001 if (!cinergyt2->sleeping) {
@@ -1003,7 +1008,7 @@ static int cinergyt2_resume (struct usb_interface *intf)
1003 1008
1004 cinergyt2_resume_rc(cinergyt2); 1009 cinergyt2_resume_rc(cinergyt2);
1005 1010
1006 up(&cinergyt2->sem); 1011 mutex_unlock(&cinergyt2->sem);
1007 return 0; 1012 return 0;
1008} 1013}
1009 1014
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 7b8373ad121b..09e96e9ddbdf 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -1,9 +1,8 @@
1/* 1/*
2 * dmxdev.c - DVB demultiplexer device 2 * dmxdev.c - DVB demultiplexer device
3 * 3 *
4 * Copyright (C) 2000 Ralph Metzler <ralph@convergence.de> 4 * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
5 * & Marcus Metzler <marcus@convergence.de> 5 * for convergence integrated media GmbH
6 for convergence integrated media GmbH
7 * 6 *
8 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public License 8 * modify it under the terms of the GNU Lesser General Public License
@@ -32,7 +31,6 @@
32#include <linux/wait.h> 31#include <linux/wait.h>
33#include <asm/uaccess.h> 32#include <asm/uaccess.h>
34#include <asm/system.h> 33#include <asm/system.h>
35
36#include "dmxdev.h" 34#include "dmxdev.h"
37 35
38static int debug; 36static int debug;
@@ -42,177 +40,133 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
42 40
43#define dprintk if (debug) printk 41#define dprintk if (debug) printk
44 42
45static inline void dvb_dmxdev_buffer_init(struct dmxdev_buffer *buffer) 43static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
44 const u8 *src, size_t len)
46{ 45{
47 buffer->data=NULL; 46 ssize_t free;
48 buffer->size=8192;
49 buffer->pread=0;
50 buffer->pwrite=0;
51 buffer->error=0;
52 init_waitqueue_head(&buffer->queue);
53}
54
55static inline int dvb_dmxdev_buffer_write(struct dmxdev_buffer *buf, const u8 *src, int len)
56{
57 int split;
58 int free;
59 int todo;
60 47
61 if (!len) 48 if (!len)
62 return 0; 49 return 0;
63 if (!buf->data) 50 if (!buf->data)
64 return 0; 51 return 0;
65 52
66 free=buf->pread-buf->pwrite; 53 free = dvb_ringbuffer_free(buf);
67 split=0; 54 if (len > free) {
68 if (free<=0) {
69 free+=buf->size;
70 split=buf->size-buf->pwrite;
71 }
72 if (len>=free) {
73 dprintk("dmxdev: buffer overflow\n"); 55 dprintk("dmxdev: buffer overflow\n");
74 return -1; 56 return -EOVERFLOW;
75 } 57 }
76 if (split>=len) 58
77 split=0; 59 return dvb_ringbuffer_write(buf, src, len);
78 todo=len;
79 if (split) {
80 memcpy(buf->data + buf->pwrite, src, split);
81 todo-=split;
82 buf->pwrite=0;
83 }
84 memcpy(buf->data + buf->pwrite, src+split, todo);
85 buf->pwrite=(buf->pwrite+todo)%buf->size;
86 return len;
87} 60}
88 61
89static ssize_t dvb_dmxdev_buffer_read(struct dmxdev_buffer *src, 62static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
90 int non_blocking, char __user *buf, size_t count, loff_t *ppos) 63 int non_blocking, char __user *buf,
64 size_t count, loff_t *ppos)
91{ 65{
92 unsigned long todo=count; 66 size_t todo;
93 int split, avail, error; 67 ssize_t avail;
68 ssize_t ret = 0;
94 69
95 if (!src->data) 70 if (!src->data)
96 return 0; 71 return 0;
97 72
98 if ((error=src->error)) { 73 if (src->error) {
99 src->pwrite=src->pread; 74 ret = src->error;
100 src->error=0; 75 dvb_ringbuffer_flush(src);
101 return error; 76 return ret;
102 } 77 }
103 78
104 if (non_blocking && (src->pwrite==src->pread)) 79 for (todo = count; todo > 0; todo -= ret) {
105 return -EWOULDBLOCK; 80 if (non_blocking && dvb_ringbuffer_empty(src)) {
106 81 ret = -EWOULDBLOCK;
107 while (todo>0) { 82 break;
108 if (non_blocking && (src->pwrite==src->pread)) 83 }
109 return (count-todo) ? (count-todo) : -EWOULDBLOCK;
110 84
111 if (wait_event_interruptible(src->queue, 85 ret = wait_event_interruptible(src->queue,
112 (src->pread!=src->pwrite) || 86 !dvb_ringbuffer_empty(src) ||
113 (src->error))<0) 87 (src->error != 0));
114 return count-todo; 88 if (ret < 0)
89 break;
115 90
116 if ((error=src->error)) { 91 if (src->error) {
117 src->pwrite=src->pread; 92 ret = src->error;
118 src->error=0; 93 dvb_ringbuffer_flush(src);
119 return error; 94 break;
120 } 95 }
121 96
122 split=src->size; 97 avail = dvb_ringbuffer_avail(src);
123 avail=src->pwrite - src->pread; 98 if (avail > todo)
124 if (avail<0) { 99 avail = todo;
125 avail+=src->size; 100
126 split=src->size - src->pread; 101 ret = dvb_ringbuffer_read(src, buf, avail, 1);
127 } 102 if (ret < 0)
128 if (avail>todo) 103 break;
129 avail=todo; 104
130 if (split<avail) { 105 buf += ret;
131 if (copy_to_user(buf, src->data+src->pread, split))
132 return -EFAULT;
133 buf+=split;
134 src->pread=0;
135 todo-=split;
136 avail-=split;
137 }
138 if (avail) {
139 if (copy_to_user(buf, src->data+src->pread, avail))
140 return -EFAULT;
141 src->pread = (src->pread + avail) % src->size;
142 todo-=avail;
143 buf+=avail;
144 }
145 } 106 }
146 return count; 107
108 return (count - todo) ? (count - todo) : ret;
147} 109}
148 110
149static struct dmx_frontend * get_fe(struct dmx_demux *demux, int type) 111static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type)
150{ 112{
151 struct list_head *head, *pos; 113 struct list_head *head, *pos;
152 114
153 head=demux->get_frontends(demux); 115 head = demux->get_frontends(demux);
154 if (!head) 116 if (!head)
155 return NULL; 117 return NULL;
156 list_for_each(pos, head) 118 list_for_each(pos, head)
157 if (DMX_FE_ENTRY(pos)->source==type) 119 if (DMX_FE_ENTRY(pos)->source == type)
158 return DMX_FE_ENTRY(pos); 120 return DMX_FE_ENTRY(pos);
159 121
160 return NULL; 122 return NULL;
161} 123}
162 124
163static inline void dvb_dmxdev_dvr_state_set(struct dmxdev_dvr *dmxdevdvr, int state)
164{
165 spin_lock_irq(&dmxdevdvr->dev->lock);
166 dmxdevdvr->state=state;
167 spin_unlock_irq(&dmxdevdvr->dev->lock);
168}
169
170static int dvb_dvr_open(struct inode *inode, struct file *file) 125static int dvb_dvr_open(struct inode *inode, struct file *file)
171{ 126{
172 struct dvb_device *dvbdev = file->private_data; 127 struct dvb_device *dvbdev = file->private_data;
173 struct dmxdev *dmxdev = dvbdev->priv; 128 struct dmxdev *dmxdev = dvbdev->priv;
174 struct dmx_frontend *front; 129 struct dmx_frontend *front;
175 130
176 dprintk ("function : %s\n", __FUNCTION__); 131 dprintk("function : %s\n", __FUNCTION__);
177 132
178 if (down_interruptible (&dmxdev->mutex)) 133 if (mutex_lock_interruptible(&dmxdev->mutex))
179 return -ERESTARTSYS; 134 return -ERESTARTSYS;
180 135
181 if ((file->f_flags&O_ACCMODE)==O_RDWR) { 136 if ((file->f_flags & O_ACCMODE) == O_RDWR) {
182 if (!(dmxdev->capabilities&DMXDEV_CAP_DUPLEX)) { 137 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
183 up(&dmxdev->mutex); 138 mutex_unlock(&dmxdev->mutex);
184 return -EOPNOTSUPP; 139 return -EOPNOTSUPP;
185 } 140 }
186 } 141 }
187 142
188 if ((file->f_flags&O_ACCMODE)==O_RDONLY) { 143 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
189 dvb_dmxdev_buffer_init(&dmxdev->dvr_buffer); 144 void *mem = vmalloc(DVR_BUFFER_SIZE);
190 dmxdev->dvr_buffer.size=DVR_BUFFER_SIZE; 145 if (!mem) {
191 dmxdev->dvr_buffer.data=vmalloc(DVR_BUFFER_SIZE); 146 mutex_unlock(&dmxdev->mutex);
192 if (!dmxdev->dvr_buffer.data) { 147 return -ENOMEM;
193 up(&dmxdev->mutex); 148 }
194 return -ENOMEM; 149 dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
195 }
196 } 150 }
197 151
198 if ((file->f_flags&O_ACCMODE)==O_WRONLY) { 152 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
199 dmxdev->dvr_orig_fe=dmxdev->demux->frontend; 153 dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
200 154
201 if (!dmxdev->demux->write) { 155 if (!dmxdev->demux->write) {
202 up(&dmxdev->mutex); 156 mutex_unlock(&dmxdev->mutex);
203 return -EOPNOTSUPP; 157 return -EOPNOTSUPP;
204 } 158 }
205 159
206 front=get_fe(dmxdev->demux, DMX_MEMORY_FE); 160 front = get_fe(dmxdev->demux, DMX_MEMORY_FE);
207 161
208 if (!front) { 162 if (!front) {
209 up(&dmxdev->mutex); 163 mutex_unlock(&dmxdev->mutex);
210 return -EINVAL; 164 return -EINVAL;
211 } 165 }
212 dmxdev->demux->disconnect_frontend(dmxdev->demux); 166 dmxdev->demux->disconnect_frontend(dmxdev->demux);
213 dmxdev->demux->connect_frontend(dmxdev->demux, front); 167 dmxdev->demux->connect_frontend(dmxdev->demux, front);
214 } 168 }
215 up(&dmxdev->mutex); 169 mutex_unlock(&dmxdev->mutex);
216 return 0; 170 return 0;
217} 171}
218 172
@@ -221,30 +175,30 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
221 struct dvb_device *dvbdev = file->private_data; 175 struct dvb_device *dvbdev = file->private_data;
222 struct dmxdev *dmxdev = dvbdev->priv; 176 struct dmxdev *dmxdev = dvbdev->priv;
223 177
224 if (down_interruptible (&dmxdev->mutex)) 178 if (mutex_lock_interruptible(&dmxdev->mutex))
225 return -ERESTARTSYS; 179 return -ERESTARTSYS;
226 180
227 if ((file->f_flags&O_ACCMODE)==O_WRONLY) { 181 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
228 dmxdev->demux->disconnect_frontend(dmxdev->demux); 182 dmxdev->demux->disconnect_frontend(dmxdev->demux);
229 dmxdev->demux->connect_frontend(dmxdev->demux, 183 dmxdev->demux->connect_frontend(dmxdev->demux,
230 dmxdev->dvr_orig_fe); 184 dmxdev->dvr_orig_fe);
231 } 185 }
232 if ((file->f_flags&O_ACCMODE)==O_RDONLY) { 186 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
233 if (dmxdev->dvr_buffer.data) { 187 if (dmxdev->dvr_buffer.data) {
234 void *mem=dmxdev->dvr_buffer.data; 188 void *mem = dmxdev->dvr_buffer.data;
235 mb(); 189 mb();
236 spin_lock_irq(&dmxdev->lock); 190 spin_lock_irq(&dmxdev->lock);
237 dmxdev->dvr_buffer.data=NULL; 191 dmxdev->dvr_buffer.data = NULL;
238 spin_unlock_irq(&dmxdev->lock); 192 spin_unlock_irq(&dmxdev->lock);
239 vfree(mem); 193 vfree(mem);
240 } 194 }
241 } 195 }
242 up(&dmxdev->mutex); 196 mutex_unlock(&dmxdev->mutex);
243 return 0; 197 return 0;
244} 198}
245 199
246static ssize_t dvb_dvr_write(struct file *file, const char __user *buf, 200static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
247 size_t count, loff_t *ppos) 201 size_t count, loff_t *ppos)
248{ 202{
249 struct dvb_device *dvbdev = file->private_data; 203 struct dvb_device *dvbdev = file->private_data;
250 struct dmxdev *dmxdev = dvbdev->priv; 204 struct dmxdev *dmxdev = dvbdev->priv;
@@ -252,60 +206,62 @@ static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
252 206
253 if (!dmxdev->demux->write) 207 if (!dmxdev->demux->write)
254 return -EOPNOTSUPP; 208 return -EOPNOTSUPP;
255 if ((file->f_flags&O_ACCMODE)!=O_WRONLY) 209 if ((file->f_flags & O_ACCMODE) != O_WRONLY)
256 return -EINVAL; 210 return -EINVAL;
257 if (down_interruptible (&dmxdev->mutex)) 211 if (mutex_lock_interruptible(&dmxdev->mutex))
258 return -ERESTARTSYS; 212 return -ERESTARTSYS;
259 ret=dmxdev->demux->write(dmxdev->demux, buf, count); 213 ret = dmxdev->demux->write(dmxdev->demux, buf, count);
260 up(&dmxdev->mutex); 214 mutex_unlock(&dmxdev->mutex);
261 return ret; 215 return ret;
262} 216}
263 217
264static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count, 218static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
265 loff_t *ppos) 219 loff_t *ppos)
266{ 220{
267 struct dvb_device *dvbdev = file->private_data; 221 struct dvb_device *dvbdev = file->private_data;
268 struct dmxdev *dmxdev = dvbdev->priv; 222 struct dmxdev *dmxdev = dvbdev->priv;
269 int ret; 223 int ret;
270 224
271 //down(&dmxdev->mutex); 225 //mutex_lock(&dmxdev->mutex);
272 ret= dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer, 226 ret = dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
273 file->f_flags&O_NONBLOCK, 227 file->f_flags & O_NONBLOCK,
274 buf, count, ppos); 228 buf, count, ppos);
275 //up(&dmxdev->mutex); 229 //mutex_unlock(&dmxdev->mutex);
276 return ret; 230 return ret;
277} 231}
278 232
279static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter *dmxdevfilter, int state) 233static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
234 *dmxdevfilter, int state)
280{ 235{
281 spin_lock_irq(&dmxdevfilter->dev->lock); 236 spin_lock_irq(&dmxdevfilter->dev->lock);
282 dmxdevfilter->state=state; 237 dmxdevfilter->state = state;
283 spin_unlock_irq(&dmxdevfilter->dev->lock); 238 spin_unlock_irq(&dmxdevfilter->dev->lock);
284} 239}
285 240
286static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter, unsigned long size) 241static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
242 unsigned long size)
287{ 243{
288 struct dmxdev_buffer *buf=&dmxdevfilter->buffer; 244 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
289 void *mem; 245 void *mem;
290 246
291 if (buf->size==size) 247 if (buf->size == size)
292 return 0; 248 return 0;
293 if (dmxdevfilter->state>=DMXDEV_STATE_GO) 249 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
294 return -EBUSY; 250 return -EBUSY;
295 spin_lock_irq(&dmxdevfilter->dev->lock); 251 spin_lock_irq(&dmxdevfilter->dev->lock);
296 mem=buf->data; 252 mem = buf->data;
297 buf->data=NULL; 253 buf->data = NULL;
298 buf->size=size; 254 buf->size = size;
299 buf->pwrite=buf->pread=0; 255 dvb_ringbuffer_flush(buf);
300 spin_unlock_irq(&dmxdevfilter->dev->lock); 256 spin_unlock_irq(&dmxdevfilter->dev->lock);
301 vfree(mem); 257 vfree(mem);
302 258
303 if (buf->size) { 259 if (buf->size) {
304 mem=vmalloc(dmxdevfilter->buffer.size); 260 mem = vmalloc(dmxdevfilter->buffer.size);
305 if (!mem) 261 if (!mem)
306 return -ENOMEM; 262 return -ENOMEM;
307 spin_lock_irq(&dmxdevfilter->dev->lock); 263 spin_lock_irq(&dmxdevfilter->dev->lock);
308 buf->data=mem; 264 buf->data = mem;
309 spin_unlock_irq(&dmxdevfilter->dev->lock); 265 spin_unlock_irq(&dmxdevfilter->dev->lock);
310 } 266 }
311 return 0; 267 return 0;
@@ -313,31 +269,33 @@ static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter, unsign
313 269
314static void dvb_dmxdev_filter_timeout(unsigned long data) 270static void dvb_dmxdev_filter_timeout(unsigned long data)
315{ 271{
316 struct dmxdev_filter *dmxdevfilter=(struct dmxdev_filter *)data; 272 struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
317 273
318 dmxdevfilter->buffer.error=-ETIMEDOUT; 274 dmxdevfilter->buffer.error = -ETIMEDOUT;
319 spin_lock_irq(&dmxdevfilter->dev->lock); 275 spin_lock_irq(&dmxdevfilter->dev->lock);
320 dmxdevfilter->state=DMXDEV_STATE_TIMEDOUT; 276 dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
321 spin_unlock_irq(&dmxdevfilter->dev->lock); 277 spin_unlock_irq(&dmxdevfilter->dev->lock);
322 wake_up(&dmxdevfilter->buffer.queue); 278 wake_up(&dmxdevfilter->buffer.queue);
323} 279}
324 280
325static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter) 281static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
326{ 282{
327 struct dmx_sct_filter_params *para=&dmxdevfilter->params.sec; 283 struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec;
328 284
329 del_timer(&dmxdevfilter->timer); 285 del_timer(&dmxdevfilter->timer);
330 if (para->timeout) { 286 if (para->timeout) {
331 dmxdevfilter->timer.function=dvb_dmxdev_filter_timeout; 287 dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout;
332 dmxdevfilter->timer.data=(unsigned long) dmxdevfilter; 288 dmxdevfilter->timer.data = (unsigned long)dmxdevfilter;
333 dmxdevfilter->timer.expires=jiffies+1+(HZ/2+HZ*para->timeout)/1000; 289 dmxdevfilter->timer.expires =
290 jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
334 add_timer(&dmxdevfilter->timer); 291 add_timer(&dmxdevfilter->timer);
335 } 292 }
336} 293}
337 294
338static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, 295static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
339 const u8 *buffer2, size_t buffer2_len, 296 const u8 *buffer2, size_t buffer2_len,
340 struct dmx_section_filter *filter, enum dmx_success success) 297 struct dmx_section_filter *filter,
298 enum dmx_success success)
341{ 299{
342 struct dmxdev_filter *dmxdevfilter = filter->priv; 300 struct dmxdev_filter *dmxdevfilter = filter->priv;
343 int ret; 301 int ret;
@@ -347,68 +305,68 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
347 return 0; 305 return 0;
348 } 306 }
349 spin_lock(&dmxdevfilter->dev->lock); 307 spin_lock(&dmxdevfilter->dev->lock);
350 if (dmxdevfilter->state!=DMXDEV_STATE_GO) { 308 if (dmxdevfilter->state != DMXDEV_STATE_GO) {
351 spin_unlock(&dmxdevfilter->dev->lock); 309 spin_unlock(&dmxdevfilter->dev->lock);
352 return 0; 310 return 0;
353 } 311 }
354 del_timer(&dmxdevfilter->timer); 312 del_timer(&dmxdevfilter->timer);
355 dprintk("dmxdev: section callback %02x %02x %02x %02x %02x %02x\n", 313 dprintk("dmxdev: section callback %02x %02x %02x %02x %02x %02x\n",
356 buffer1[0], buffer1[1], 314 buffer1[0], buffer1[1],
357 buffer1[2], buffer1[3], 315 buffer1[2], buffer1[3], buffer1[4], buffer1[5]);
358 buffer1[4], buffer1[5]); 316 ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
359 ret=dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, buffer1_len); 317 buffer1_len);
360 if (ret==buffer1_len) { 318 if (ret == buffer1_len) {
361 ret=dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len); 319 ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
320 buffer2_len);
362 } 321 }
363 if (ret<0) { 322 if (ret < 0) {
364 dmxdevfilter->buffer.pwrite=dmxdevfilter->buffer.pread; 323 dvb_ringbuffer_flush(&dmxdevfilter->buffer);
365 dmxdevfilter->buffer.error=-EOVERFLOW; 324 dmxdevfilter->buffer.error = ret;
366 } 325 }
367 if (dmxdevfilter->params.sec.flags&DMX_ONESHOT) 326 if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
368 dmxdevfilter->state=DMXDEV_STATE_DONE; 327 dmxdevfilter->state = DMXDEV_STATE_DONE;
369 spin_unlock(&dmxdevfilter->dev->lock); 328 spin_unlock(&dmxdevfilter->dev->lock);
370 wake_up(&dmxdevfilter->buffer.queue); 329 wake_up(&dmxdevfilter->buffer.queue);
371 return 0; 330 return 0;
372} 331}
373 332
374static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, 333static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
375 const u8 *buffer2, size_t buffer2_len, 334 const u8 *buffer2, size_t buffer2_len,
376 struct dmx_ts_feed *feed, enum dmx_success success) 335 struct dmx_ts_feed *feed,
336 enum dmx_success success)
377{ 337{
378 struct dmxdev_filter *dmxdevfilter = feed->priv; 338 struct dmxdev_filter *dmxdevfilter = feed->priv;
379 struct dmxdev_buffer *buffer; 339 struct dvb_ringbuffer *buffer;
380 int ret; 340 int ret;
381 341
382 spin_lock(&dmxdevfilter->dev->lock); 342 spin_lock(&dmxdevfilter->dev->lock);
383 if (dmxdevfilter->params.pes.output==DMX_OUT_DECODER) { 343 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
384 spin_unlock(&dmxdevfilter->dev->lock); 344 spin_unlock(&dmxdevfilter->dev->lock);
385 return 0; 345 return 0;
386 } 346 }
387 347
388 if (dmxdevfilter->params.pes.output==DMX_OUT_TAP) 348 if (dmxdevfilter->params.pes.output == DMX_OUT_TAP)
389 buffer=&dmxdevfilter->buffer; 349 buffer = &dmxdevfilter->buffer;
390 else 350 else
391 buffer=&dmxdevfilter->dev->dvr_buffer; 351 buffer = &dmxdevfilter->dev->dvr_buffer;
392 if (buffer->error) { 352 if (buffer->error) {
393 spin_unlock(&dmxdevfilter->dev->lock); 353 spin_unlock(&dmxdevfilter->dev->lock);
394 wake_up(&buffer->queue); 354 wake_up(&buffer->queue);
395 return 0; 355 return 0;
396 } 356 }
397 ret=dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len); 357 ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
398 if (ret==buffer1_len) 358 if (ret == buffer1_len)
399 ret=dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len); 359 ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
400 if (ret<0) { 360 if (ret < 0) {
401 buffer->pwrite=buffer->pread; 361 dvb_ringbuffer_flush(buffer);
402 buffer->error=-EOVERFLOW; 362 buffer->error = ret;
403 } 363 }
404 spin_unlock(&dmxdevfilter->dev->lock); 364 spin_unlock(&dmxdevfilter->dev->lock);
405 wake_up(&buffer->queue); 365 wake_up(&buffer->queue);
406 return 0; 366 return 0;
407} 367}
408 368
409
410/* stop feed but only mark the specified filter as stopped (state set) */ 369/* stop feed but only mark the specified filter as stopped (state set) */
411
412static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter) 370static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
413{ 371{
414 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 372 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
@@ -427,20 +385,16 @@ static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
427 return 0; 385 return 0;
428} 386}
429 387
430
431/* start feed associated with the specified filter */ 388/* start feed associated with the specified filter */
432
433static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter) 389static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
434{ 390{
435 dvb_dmxdev_filter_state_set (filter, DMXDEV_STATE_GO); 391 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
436 392
437 switch (filter->type) { 393 switch (filter->type) {
438 case DMXDEV_TYPE_SEC: 394 case DMXDEV_TYPE_SEC:
439 return filter->feed.sec->start_filtering(filter->feed.sec); 395 return filter->feed.sec->start_filtering(filter->feed.sec);
440 break;
441 case DMXDEV_TYPE_PES: 396 case DMXDEV_TYPE_PES:
442 return filter->feed.ts->start_filtering(filter->feed.ts); 397 return filter->feed.ts->start_filtering(filter->feed.ts);
443 break;
444 default: 398 default:
445 return -EINVAL; 399 return -EINVAL;
446 } 400 }
@@ -448,32 +402,31 @@ static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
448 return 0; 402 return 0;
449} 403}
450 404
451
452/* restart section feed if it has filters left associated with it, 405/* restart section feed if it has filters left associated with it,
453 otherwise release the feed */ 406 otherwise release the feed */
454
455static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter) 407static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter)
456{ 408{
457 int i; 409 int i;
458 struct dmxdev *dmxdev = filter->dev; 410 struct dmxdev *dmxdev = filter->dev;
459 u16 pid = filter->params.sec.pid; 411 u16 pid = filter->params.sec.pid;
460 412
461 for (i=0; i<dmxdev->filternum; i++) 413 for (i = 0; i < dmxdev->filternum; i++)
462 if (dmxdev->filter[i].state>=DMXDEV_STATE_GO && 414 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
463 dmxdev->filter[i].type==DMXDEV_TYPE_SEC && 415 dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
464 dmxdev->filter[i].pid==pid) { 416 dmxdev->filter[i].params.sec.pid == pid) {
465 dvb_dmxdev_feed_start(&dmxdev->filter[i]); 417 dvb_dmxdev_feed_start(&dmxdev->filter[i]);
466 return 0; 418 return 0;
467 } 419 }
468 420
469 filter->dev->demux->release_section_feed(dmxdev->demux, filter->feed.sec); 421 filter->dev->demux->release_section_feed(dmxdev->demux,
422 filter->feed.sec);
470 423
471 return 0; 424 return 0;
472} 425}
473 426
474static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter) 427static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
475{ 428{
476 if (dmxdevfilter->state<DMXDEV_STATE_GO) 429 if (dmxdevfilter->state < DMXDEV_STATE_GO)
477 return 0; 430 return 0;
478 431
479 switch (dmxdevfilter->type) { 432 switch (dmxdevfilter->type) {
@@ -483,36 +436,36 @@ static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
483 dvb_dmxdev_feed_stop(dmxdevfilter); 436 dvb_dmxdev_feed_stop(dmxdevfilter);
484 if (dmxdevfilter->filter.sec) 437 if (dmxdevfilter->filter.sec)
485 dmxdevfilter->feed.sec-> 438 dmxdevfilter->feed.sec->
486 release_filter(dmxdevfilter->feed.sec, 439 release_filter(dmxdevfilter->feed.sec,
487 dmxdevfilter->filter.sec); 440 dmxdevfilter->filter.sec);
488 dvb_dmxdev_feed_restart(dmxdevfilter); 441 dvb_dmxdev_feed_restart(dmxdevfilter);
489 dmxdevfilter->feed.sec=NULL; 442 dmxdevfilter->feed.sec = NULL;
490 break; 443 break;
491 case DMXDEV_TYPE_PES: 444 case DMXDEV_TYPE_PES:
492 if (!dmxdevfilter->feed.ts) 445 if (!dmxdevfilter->feed.ts)
493 break; 446 break;
494 dvb_dmxdev_feed_stop(dmxdevfilter); 447 dvb_dmxdev_feed_stop(dmxdevfilter);
495 dmxdevfilter->dev->demux-> 448 dmxdevfilter->dev->demux->
496 release_ts_feed(dmxdevfilter->dev->demux, 449 release_ts_feed(dmxdevfilter->dev->demux,
497 dmxdevfilter->feed.ts); 450 dmxdevfilter->feed.ts);
498 dmxdevfilter->feed.ts=NULL; 451 dmxdevfilter->feed.ts = NULL;
499 break; 452 break;
500 default: 453 default:
501 if (dmxdevfilter->state==DMXDEV_STATE_ALLOCATED) 454 if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED)
502 return 0; 455 return 0;
503 return -EINVAL; 456 return -EINVAL;
504 } 457 }
505 dmxdevfilter->buffer.pwrite=dmxdevfilter->buffer.pread=0; 458
459 dvb_ringbuffer_flush(&dmxdevfilter->buffer);
506 return 0; 460 return 0;
507} 461}
508 462
509static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter) 463static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter)
510{ 464{
511 if (dmxdevfilter->state<DMXDEV_STATE_SET) 465 if (dmxdevfilter->state < DMXDEV_STATE_SET)
512 return 0; 466 return 0;
513 467
514 dmxdevfilter->type=DMXDEV_TYPE_NONE; 468 dmxdevfilter->type = DMXDEV_TYPE_NONE;
515 dmxdevfilter->pid=0xffff;
516 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 469 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
517 return 0; 470 return 0;
518} 471}
@@ -529,32 +482,33 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
529 if (filter->state >= DMXDEV_STATE_GO) 482 if (filter->state >= DMXDEV_STATE_GO)
530 dvb_dmxdev_filter_stop(filter); 483 dvb_dmxdev_filter_stop(filter);
531 484
532 if (!(mem = filter->buffer.data)) { 485 if (!filter->buffer.data) {
533 mem = vmalloc(filter->buffer.size); 486 mem = vmalloc(filter->buffer.size);
487 if (!mem)
488 return -ENOMEM;
534 spin_lock_irq(&filter->dev->lock); 489 spin_lock_irq(&filter->dev->lock);
535 filter->buffer.data=mem; 490 filter->buffer.data = mem;
536 spin_unlock_irq(&filter->dev->lock); 491 spin_unlock_irq(&filter->dev->lock);
537 if (!filter->buffer.data)
538 return -ENOMEM;
539 } 492 }
540 493
541 filter->buffer.pwrite = filter->buffer.pread = 0; 494 dvb_ringbuffer_flush(&filter->buffer);
542 495
543 switch (filter->type) { 496 switch (filter->type) {
544 case DMXDEV_TYPE_SEC: 497 case DMXDEV_TYPE_SEC:
545 { 498 {
546 struct dmx_sct_filter_params *para=&filter->params.sec; 499 struct dmx_sct_filter_params *para = &filter->params.sec;
547 struct dmx_section_filter **secfilter=&filter->filter.sec; 500 struct dmx_section_filter **secfilter = &filter->filter.sec;
548 struct dmx_section_feed **secfeed=&filter->feed.sec; 501 struct dmx_section_feed **secfeed = &filter->feed.sec;
502
503 *secfilter = NULL;
504 *secfeed = NULL;
549 505
550 *secfilter=NULL;
551 *secfeed=NULL;
552 506
553 /* find active filter/feed with same PID */ 507 /* find active filter/feed with same PID */
554 for (i=0; i<dmxdev->filternum; i++) { 508 for (i = 0; i < dmxdev->filternum; i++) {
555 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO && 509 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
556 dmxdev->filter[i].pid == para->pid && 510 dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
557 dmxdev->filter[i].type == DMXDEV_TYPE_SEC) { 511 dmxdev->filter[i].params.sec.pid == para->pid) {
558 *secfeed = dmxdev->filter[i].feed.sec; 512 *secfeed = dmxdev->filter[i].feed.sec;
559 break; 513 break;
560 } 514 }
@@ -562,21 +516,20 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
562 516
563 /* if no feed found, try to allocate new one */ 517 /* if no feed found, try to allocate new one */
564 if (!*secfeed) { 518 if (!*secfeed) {
565 ret=dmxdev->demux->allocate_section_feed(dmxdev->demux, 519 ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
566 secfeed, 520 secfeed,
567 dvb_dmxdev_section_callback); 521 dvb_dmxdev_section_callback);
568 if (ret<0) { 522 if (ret < 0) {
569 printk ("DVB (%s): could not alloc feed\n", 523 printk("DVB (%s): could not alloc feed\n",
570 __FUNCTION__); 524 __FUNCTION__);
571 return ret; 525 return ret;
572 } 526 }
573 527
574 ret=(*secfeed)->set(*secfeed, para->pid, 32768, 528 ret = (*secfeed)->set(*secfeed, para->pid, 32768,
575 (para->flags & DMX_CHECK_CRC) ? 1 : 0); 529 (para->flags & DMX_CHECK_CRC) ? 1 : 0);
576 530 if (ret < 0) {
577 if (ret<0) { 531 printk("DVB (%s): could not set feed\n",
578 printk ("DVB (%s): could not set feed\n", 532 __FUNCTION__);
579 __FUNCTION__);
580 dvb_dmxdev_feed_restart(filter); 533 dvb_dmxdev_feed_restart(filter);
581 return ret; 534 return ret;
582 } 535 }
@@ -584,41 +537,38 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
584 dvb_dmxdev_feed_stop(filter); 537 dvb_dmxdev_feed_stop(filter);
585 } 538 }
586 539
587 ret=(*secfeed)->allocate_filter(*secfeed, secfilter); 540 ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
588
589 if (ret < 0) { 541 if (ret < 0) {
590 dvb_dmxdev_feed_restart(filter); 542 dvb_dmxdev_feed_restart(filter);
591 filter->feed.sec->start_filtering(*secfeed); 543 filter->feed.sec->start_filtering(*secfeed);
592 dprintk ("could not get filter\n"); 544 dprintk("could not get filter\n");
593 return ret; 545 return ret;
594 } 546 }
595 547
596 (*secfilter)->priv = filter; 548 (*secfilter)->priv = filter;
597 549
598 memcpy(&((*secfilter)->filter_value[3]), 550 memcpy(&((*secfilter)->filter_value[3]),
599 &(para->filter.filter[1]), DMX_FILTER_SIZE-1); 551 &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
600 memcpy(&(*secfilter)->filter_mask[3], 552 memcpy(&(*secfilter)->filter_mask[3],
601 &para->filter.mask[1], DMX_FILTER_SIZE-1); 553 &para->filter.mask[1], DMX_FILTER_SIZE - 1);
602 memcpy(&(*secfilter)->filter_mode[3], 554 memcpy(&(*secfilter)->filter_mode[3],
603 &para->filter.mode[1], DMX_FILTER_SIZE-1); 555 &para->filter.mode[1], DMX_FILTER_SIZE - 1);
604 556
605 (*secfilter)->filter_value[0]=para->filter.filter[0]; 557 (*secfilter)->filter_value[0] = para->filter.filter[0];
606 (*secfilter)->filter_mask[0]=para->filter.mask[0]; 558 (*secfilter)->filter_mask[0] = para->filter.mask[0];
607 (*secfilter)->filter_mode[0]=para->filter.mode[0]; 559 (*secfilter)->filter_mode[0] = para->filter.mode[0];
608 (*secfilter)->filter_mask[1]=0; 560 (*secfilter)->filter_mask[1] = 0;
609 (*secfilter)->filter_mask[2]=0; 561 (*secfilter)->filter_mask[2] = 0;
610 562
611 filter->todo = 0; 563 filter->todo = 0;
612 564
613 ret = filter->feed.sec->start_filtering (filter->feed.sec); 565 ret = filter->feed.sec->start_filtering(filter->feed.sec);
614
615 if (ret < 0) 566 if (ret < 0)
616 return ret; 567 return ret;
617 568
618 dvb_dmxdev_filter_timer(filter); 569 dvb_dmxdev_filter_timer(filter);
619 break; 570 break;
620 } 571 }
621
622 case DMXDEV_TYPE_PES: 572 case DMXDEV_TYPE_PES:
623 { 573 {
624 struct timespec timeout = { 0 }; 574 struct timespec timeout = { 0 };
@@ -630,41 +580,41 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
630 struct dmx_ts_feed **tsfeed = &filter->feed.ts; 580 struct dmx_ts_feed **tsfeed = &filter->feed.ts;
631 581
632 filter->feed.ts = NULL; 582 filter->feed.ts = NULL;
633 otype=para->output; 583 otype = para->output;
634 584
635 ts_pes=(enum dmx_ts_pes) para->pes_type; 585 ts_pes = (enum dmx_ts_pes)para->pes_type;
636 586
637 if (ts_pes<DMX_PES_OTHER) 587 if (ts_pes < DMX_PES_OTHER)
638 ts_type=TS_DECODER; 588 ts_type = TS_DECODER;
639 else 589 else
640 ts_type=0; 590 ts_type = 0;
641 591
642 if (otype == DMX_OUT_TS_TAP) 592 if (otype == DMX_OUT_TS_TAP)
643 ts_type |= TS_PACKET; 593 ts_type |= TS_PACKET;
644 594
645 if (otype == DMX_OUT_TAP) 595 if (otype == DMX_OUT_TAP)
646 ts_type |= TS_PAYLOAD_ONLY|TS_PACKET; 596 ts_type |= TS_PAYLOAD_ONLY | TS_PACKET;
647 597
648 ret=dmxdev->demux->allocate_ts_feed(dmxdev->demux, 598 ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux,
649 tsfeed, 599 tsfeed,
650 dvb_dmxdev_ts_callback); 600 dvb_dmxdev_ts_callback);
651 if (ret<0) 601 if (ret < 0)
652 return ret; 602 return ret;
653 603
654 (*tsfeed)->priv = (void *) filter; 604 (*tsfeed)->priv = filter;
655 605
656 ret = (*tsfeed)->set(*tsfeed, para->pid, ts_type, ts_pes, 606 ret = (*tsfeed)->set(*tsfeed, para->pid, ts_type, ts_pes,
657 32768, timeout); 607 32768, timeout);
658
659 if (ret < 0) { 608 if (ret < 0) {
660 dmxdev->demux->release_ts_feed(dmxdev->demux, *tsfeed); 609 dmxdev->demux->release_ts_feed(dmxdev->demux,
610 *tsfeed);
661 return ret; 611 return ret;
662 } 612 }
663 613
664 ret = filter->feed.ts->start_filtering(filter->feed.ts); 614 ret = filter->feed.ts->start_filtering(filter->feed.ts);
665
666 if (ret < 0) { 615 if (ret < 0) {
667 dmxdev->demux->release_ts_feed(dmxdev->demux, *tsfeed); 616 dmxdev->demux->release_ts_feed(dmxdev->demux,
617 *tsfeed);
668 return ret; 618 return ret;
669 } 619 }
670 620
@@ -688,41 +638,40 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
688 if (!dmxdev->filter) 638 if (!dmxdev->filter)
689 return -EINVAL; 639 return -EINVAL;
690 640
691 if (down_interruptible(&dmxdev->mutex)) 641 if (mutex_lock_interruptible(&dmxdev->mutex))
692 return -ERESTARTSYS; 642 return -ERESTARTSYS;
693 643
694 for (i=0; i<dmxdev->filternum; i++) 644 for (i = 0; i < dmxdev->filternum; i++)
695 if (dmxdev->filter[i].state==DMXDEV_STATE_FREE) 645 if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
696 break; 646 break;
697 647
698 if (i==dmxdev->filternum) { 648 if (i == dmxdev->filternum) {
699 up(&dmxdev->mutex); 649 mutex_unlock(&dmxdev->mutex);
700 return -EMFILE; 650 return -EMFILE;
701 } 651 }
702 652
703 dmxdevfilter=&dmxdev->filter[i]; 653 dmxdevfilter = &dmxdev->filter[i];
704 sema_init(&dmxdevfilter->mutex, 1); 654 mutex_init(&dmxdevfilter->mutex);
705 dmxdevfilter->dvbdev=dmxdev->dvbdev; 655 file->private_data = dmxdevfilter;
706 file->private_data=dmxdevfilter;
707 656
708 dvb_dmxdev_buffer_init(&dmxdevfilter->buffer); 657 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
709 dmxdevfilter->type=DMXDEV_TYPE_NONE; 658 dmxdevfilter->type = DMXDEV_TYPE_NONE;
710 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 659 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
711 dmxdevfilter->feed.ts=NULL; 660 dmxdevfilter->feed.ts = NULL;
712 init_timer(&dmxdevfilter->timer); 661 init_timer(&dmxdevfilter->timer);
713 662
714 up(&dmxdev->mutex); 663 mutex_unlock(&dmxdev->mutex);
715 return 0; 664 return 0;
716} 665}
717 666
718 667static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
719static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev, struct dmxdev_filter *dmxdevfilter) 668 struct dmxdev_filter *dmxdevfilter)
720{ 669{
721 if (down_interruptible(&dmxdev->mutex)) 670 if (mutex_lock_interruptible(&dmxdev->mutex))
722 return -ERESTARTSYS; 671 return -ERESTARTSYS;
723 672
724 if (down_interruptible(&dmxdevfilter->mutex)) { 673 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
725 up(&dmxdev->mutex); 674 mutex_unlock(&dmxdev->mutex);
726 return -ERESTARTSYS; 675 return -ERESTARTSYS;
727 } 676 }
728 677
@@ -730,18 +679,18 @@ static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev, struct dmxdev_filter *d
730 dvb_dmxdev_filter_reset(dmxdevfilter); 679 dvb_dmxdev_filter_reset(dmxdevfilter);
731 680
732 if (dmxdevfilter->buffer.data) { 681 if (dmxdevfilter->buffer.data) {
733 void *mem=dmxdevfilter->buffer.data; 682 void *mem = dmxdevfilter->buffer.data;
734 683
735 spin_lock_irq(&dmxdev->lock); 684 spin_lock_irq(&dmxdev->lock);
736 dmxdevfilter->buffer.data=NULL; 685 dmxdevfilter->buffer.data = NULL;
737 spin_unlock_irq(&dmxdev->lock); 686 spin_unlock_irq(&dmxdev->lock);
738 vfree(mem); 687 vfree(mem);
739 } 688 }
740 689
741 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE); 690 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
742 wake_up(&dmxdevfilter->buffer.queue); 691 wake_up(&dmxdevfilter->buffer.queue);
743 up(&dmxdevfilter->mutex); 692 mutex_unlock(&dmxdevfilter->mutex);
744 up(&dmxdev->mutex); 693 mutex_unlock(&dmxdev->mutex);
745 return 0; 694 return 0;
746} 695}
747 696
@@ -749,173 +698,171 @@ static inline void invert_mode(dmx_filter_t *filter)
749{ 698{
750 int i; 699 int i;
751 700
752 for (i=0; i<DMX_FILTER_SIZE; i++) 701 for (i = 0; i < DMX_FILTER_SIZE; i++)
753 filter->mode[i]^=0xff; 702 filter->mode[i] ^= 0xff;
754} 703}
755 704
756
757static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev, 705static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
758 struct dmxdev_filter *dmxdevfilter, 706 struct dmxdev_filter *dmxdevfilter,
759 struct dmx_sct_filter_params *params) 707 struct dmx_sct_filter_params *params)
760{ 708{
761 dprintk ("function : %s\n", __FUNCTION__); 709 dprintk("function : %s\n", __FUNCTION__);
762 710
763 dvb_dmxdev_filter_stop(dmxdevfilter); 711 dvb_dmxdev_filter_stop(dmxdevfilter);
764 712
765 dmxdevfilter->type=DMXDEV_TYPE_SEC; 713 dmxdevfilter->type = DMXDEV_TYPE_SEC;
766 dmxdevfilter->pid=params->pid;
767 memcpy(&dmxdevfilter->params.sec, 714 memcpy(&dmxdevfilter->params.sec,
768 params, sizeof(struct dmx_sct_filter_params)); 715 params, sizeof(struct dmx_sct_filter_params));
769 invert_mode(&dmxdevfilter->params.sec.filter); 716 invert_mode(&dmxdevfilter->params.sec.filter);
770 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 717 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
771 718
772 if (params->flags&DMX_IMMEDIATE_START) 719 if (params->flags & DMX_IMMEDIATE_START)
773 return dvb_dmxdev_filter_start(dmxdevfilter); 720 return dvb_dmxdev_filter_start(dmxdevfilter);
774 721
775 return 0; 722 return 0;
776} 723}
777 724
778static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, 725static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
779 struct dmxdev_filter *dmxdevfilter, 726 struct dmxdev_filter *dmxdevfilter,
780 struct dmx_pes_filter_params *params) 727 struct dmx_pes_filter_params *params)
781{ 728{
782 dvb_dmxdev_filter_stop(dmxdevfilter); 729 dvb_dmxdev_filter_stop(dmxdevfilter);
783 730
784 if (params->pes_type>DMX_PES_OTHER || params->pes_type<0) 731 if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
785 return -EINVAL; 732 return -EINVAL;
786 733
787 dmxdevfilter->type=DMXDEV_TYPE_PES; 734 dmxdevfilter->type = DMXDEV_TYPE_PES;
788 dmxdevfilter->pid=params->pid; 735 memcpy(&dmxdevfilter->params, params,
789 memcpy(&dmxdevfilter->params, params, sizeof(struct dmx_pes_filter_params)); 736 sizeof(struct dmx_pes_filter_params));
790 737
791 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 738 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
792 739
793 if (params->flags&DMX_IMMEDIATE_START) 740 if (params->flags & DMX_IMMEDIATE_START)
794 return dvb_dmxdev_filter_start(dmxdevfilter); 741 return dvb_dmxdev_filter_start(dmxdevfilter);
795 742
796 return 0; 743 return 0;
797} 744}
798 745
799static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil, 746static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
800 struct file *file, char __user *buf, size_t count, loff_t *ppos) 747 struct file *file, char __user *buf,
748 size_t count, loff_t *ppos)
801{ 749{
802 int result, hcount; 750 int result, hcount;
803 int done=0; 751 int done = 0;
804 752
805 if (dfil->todo<=0) { 753 if (dfil->todo <= 0) {
806 hcount=3+dfil->todo; 754 hcount = 3 + dfil->todo;
807 if (hcount>count) 755 if (hcount > count)
808 hcount=count; 756 hcount = count;
809 result=dvb_dmxdev_buffer_read(&dfil->buffer, file->f_flags&O_NONBLOCK, 757 result = dvb_dmxdev_buffer_read(&dfil->buffer,
810 buf, hcount, ppos); 758 file->f_flags & O_NONBLOCK,
811 if (result<0) { 759 buf, hcount, ppos);
812 dfil->todo=0; 760 if (result < 0) {
761 dfil->todo = 0;
813 return result; 762 return result;
814 } 763 }
815 if (copy_from_user(dfil->secheader-dfil->todo, buf, result)) 764 if (copy_from_user(dfil->secheader - dfil->todo, buf, result))
816 return -EFAULT; 765 return -EFAULT;
817 buf+=result; 766 buf += result;
818 done=result; 767 done = result;
819 count-=result; 768 count -= result;
820 dfil->todo-=result; 769 dfil->todo -= result;
821 if (dfil->todo>-3) 770 if (dfil->todo > -3)
822 return done; 771 return done;
823 dfil->todo=((dfil->secheader[1]<<8)|dfil->secheader[2])&0xfff; 772 dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff;
824 if (!count) 773 if (!count)
825 return done; 774 return done;
826 } 775 }
827 if (count>dfil->todo) 776 if (count > dfil->todo)
828 count=dfil->todo; 777 count = dfil->todo;
829 result=dvb_dmxdev_buffer_read(&dfil->buffer, file->f_flags&O_NONBLOCK, 778 result = dvb_dmxdev_buffer_read(&dfil->buffer,
830 buf, count, ppos); 779 file->f_flags & O_NONBLOCK,
831 if (result<0) 780 buf, count, ppos);
781 if (result < 0)
832 return result; 782 return result;
833 dfil->todo-=result; 783 dfil->todo -= result;
834 return (result+done); 784 return (result + done);
835} 785}
836 786
837
838static ssize_t 787static ssize_t
839dvb_demux_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 788dvb_demux_read(struct file *file, char __user *buf, size_t count,
789 loff_t *ppos)
840{ 790{
841 struct dmxdev_filter *dmxdevfilter= file->private_data; 791 struct dmxdev_filter *dmxdevfilter = file->private_data;
842 int ret=0; 792 int ret;
843 793
844 if (down_interruptible(&dmxdevfilter->mutex)) 794 if (mutex_lock_interruptible(&dmxdevfilter->mutex))
845 return -ERESTARTSYS; 795 return -ERESTARTSYS;
846 796
847 if (dmxdevfilter->type==DMXDEV_TYPE_SEC) 797 if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
848 ret=dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos); 798 ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
849 else 799 else
850 ret=dvb_dmxdev_buffer_read(&dmxdevfilter->buffer, 800 ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
851 file->f_flags&O_NONBLOCK, 801 file->f_flags & O_NONBLOCK,
852 buf, count, ppos); 802 buf, count, ppos);
853 803
854 up(&dmxdevfilter->mutex); 804 mutex_unlock(&dmxdevfilter->mutex);
855 return ret; 805 return ret;
856} 806}
857 807
858
859static int dvb_demux_do_ioctl(struct inode *inode, struct file *file, 808static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
860 unsigned int cmd, void *parg) 809 unsigned int cmd, void *parg)
861{ 810{
862 struct dmxdev_filter *dmxdevfilter = file->private_data; 811 struct dmxdev_filter *dmxdevfilter = file->private_data;
863 struct dmxdev *dmxdev=dmxdevfilter->dev; 812 struct dmxdev *dmxdev = dmxdevfilter->dev;
864 unsigned long arg=(unsigned long) parg; 813 unsigned long arg = (unsigned long)parg;
865 int ret=0; 814 int ret = 0;
866 815
867 if (down_interruptible (&dmxdev->mutex)) 816 if (mutex_lock_interruptible(&dmxdev->mutex))
868 return -ERESTARTSYS; 817 return -ERESTARTSYS;
869 818
870 switch (cmd) { 819 switch (cmd) {
871 case DMX_START: 820 case DMX_START:
872 if (down_interruptible(&dmxdevfilter->mutex)) { 821 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
873 up(&dmxdev->mutex); 822 mutex_unlock(&dmxdev->mutex);
874 return -ERESTARTSYS; 823 return -ERESTARTSYS;
875 } 824 }
876 if (dmxdevfilter->state<DMXDEV_STATE_SET) 825 if (dmxdevfilter->state < DMXDEV_STATE_SET)
877 ret = -EINVAL; 826 ret = -EINVAL;
878 else 827 else
879 ret = dvb_dmxdev_filter_start(dmxdevfilter); 828 ret = dvb_dmxdev_filter_start(dmxdevfilter);
880 up(&dmxdevfilter->mutex); 829 mutex_unlock(&dmxdevfilter->mutex);
881 break; 830 break;
882 831
883 case DMX_STOP: 832 case DMX_STOP:
884 if (down_interruptible(&dmxdevfilter->mutex)) { 833 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
885 up(&dmxdev->mutex); 834 mutex_unlock(&dmxdev->mutex);
886 return -ERESTARTSYS; 835 return -ERESTARTSYS;
887 } 836 }
888 ret=dvb_dmxdev_filter_stop(dmxdevfilter); 837 ret = dvb_dmxdev_filter_stop(dmxdevfilter);
889 up(&dmxdevfilter->mutex); 838 mutex_unlock(&dmxdevfilter->mutex);
890 break; 839 break;
891 840
892 case DMX_SET_FILTER: 841 case DMX_SET_FILTER:
893 if (down_interruptible(&dmxdevfilter->mutex)) { 842 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
894 up(&dmxdev->mutex); 843 mutex_unlock(&dmxdev->mutex);
895 return -ERESTARTSYS; 844 return -ERESTARTSYS;
896 } 845 }
897 ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, 846 ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg);
898 (struct dmx_sct_filter_params *)parg); 847 mutex_unlock(&dmxdevfilter->mutex);
899 up(&dmxdevfilter->mutex);
900 break; 848 break;
901 849
902 case DMX_SET_PES_FILTER: 850 case DMX_SET_PES_FILTER:
903 if (down_interruptible(&dmxdevfilter->mutex)) { 851 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
904 up(&dmxdev->mutex); 852 mutex_unlock(&dmxdev->mutex);
905 return -ERESTARTSYS; 853 return -ERESTARTSYS;
906 } 854 }
907 ret=dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, 855 ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg);
908 (struct dmx_pes_filter_params *)parg); 856 mutex_unlock(&dmxdevfilter->mutex);
909 up(&dmxdevfilter->mutex);
910 break; 857 break;
911 858
912 case DMX_SET_BUFFER_SIZE: 859 case DMX_SET_BUFFER_SIZE:
913 if (down_interruptible(&dmxdevfilter->mutex)) { 860 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
914 up(&dmxdev->mutex); 861 mutex_unlock(&dmxdev->mutex);
915 return -ERESTARTSYS; 862 return -ERESTARTSYS;
916 } 863 }
917 ret=dvb_dmxdev_set_buffer_size(dmxdevfilter, arg); 864 ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg);
918 up(&dmxdevfilter->mutex); 865 mutex_unlock(&dmxdevfilter->mutex);
919 break; 866 break;
920 867
921 case DMX_GET_EVENT: 868 case DMX_GET_EVENT:
@@ -923,10 +870,10 @@ static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
923 870
924 case DMX_GET_PES_PIDS: 871 case DMX_GET_PES_PIDS:
925 if (!dmxdev->demux->get_pes_pids) { 872 if (!dmxdev->demux->get_pes_pids) {
926 ret=-EINVAL; 873 ret = -EINVAL;
927 break; 874 break;
928 } 875 }
929 dmxdev->demux->get_pes_pids(dmxdev->demux, (u16 *)parg); 876 dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
930 break; 877 break;
931 878
932 case DMX_GET_CAPS: 879 case DMX_GET_CAPS:
@@ -947,19 +894,20 @@ static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
947 894
948 case DMX_GET_STC: 895 case DMX_GET_STC:
949 if (!dmxdev->demux->get_stc) { 896 if (!dmxdev->demux->get_stc) {
950 ret=-EINVAL; 897 ret = -EINVAL;
951 break; 898 break;
952 } 899 }
953 ret = dmxdev->demux->get_stc(dmxdev->demux, 900 ret = dmxdev->demux->get_stc(dmxdev->demux,
954 ((struct dmx_stc *)parg)->num, 901 ((struct dmx_stc *)parg)->num,
955 &((struct dmx_stc *)parg)->stc, 902 &((struct dmx_stc *)parg)->stc,
956 &((struct dmx_stc *)parg)->base); 903 &((struct dmx_stc *)parg)->base);
957 break; 904 break;
958 905
959 default: 906 default:
960 ret=-EINVAL; 907 ret = -EINVAL;
908 break;
961 } 909 }
962 up(&dmxdev->mutex); 910 mutex_unlock(&dmxdev->mutex);
963 return ret; 911 return ret;
964} 912}
965 913
@@ -969,8 +917,7 @@ static int dvb_demux_ioctl(struct inode *inode, struct file *file,
969 return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl); 917 return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl);
970} 918}
971 919
972 920static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
973static unsigned int dvb_demux_poll (struct file *file, poll_table *wait)
974{ 921{
975 struct dmxdev_filter *dmxdevfilter = file->private_data; 922 struct dmxdev_filter *dmxdevfilter = file->private_data;
976 unsigned int mask = 0; 923 unsigned int mask = 0;
@@ -988,13 +935,12 @@ static unsigned int dvb_demux_poll (struct file *file, poll_table *wait)
988 if (dmxdevfilter->buffer.error) 935 if (dmxdevfilter->buffer.error)
989 mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR); 936 mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
990 937
991 if (dmxdevfilter->buffer.pread != dmxdevfilter->buffer.pwrite) 938 if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
992 mask |= (POLLIN | POLLRDNORM | POLLPRI); 939 mask |= (POLLIN | POLLRDNORM | POLLPRI);
993 940
994 return mask; 941 return mask;
995} 942}
996 943
997
998static int dvb_demux_release(struct inode *inode, struct file *file) 944static int dvb_demux_release(struct inode *inode, struct file *file)
999{ 945{
1000 struct dmxdev_filter *dmxdevfilter = file->private_data; 946 struct dmxdev_filter *dmxdevfilter = file->private_data;
@@ -1003,72 +949,67 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
1003 return dvb_dmxdev_filter_free(dmxdev, dmxdevfilter); 949 return dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
1004} 950}
1005 951
1006
1007static struct file_operations dvb_demux_fops = { 952static struct file_operations dvb_demux_fops = {
1008 .owner = THIS_MODULE, 953 .owner = THIS_MODULE,
1009 .read = dvb_demux_read, 954 .read = dvb_demux_read,
1010 .ioctl = dvb_demux_ioctl, 955 .ioctl = dvb_demux_ioctl,
1011 .open = dvb_demux_open, 956 .open = dvb_demux_open,
1012 .release = dvb_demux_release, 957 .release = dvb_demux_release,
1013 .poll = dvb_demux_poll, 958 .poll = dvb_demux_poll,
1014}; 959};
1015 960
1016
1017static struct dvb_device dvbdev_demux = { 961static struct dvb_device dvbdev_demux = {
1018 .priv = NULL, 962 .priv = NULL,
1019 .users = 1, 963 .users = 1,
1020 .writers = 1, 964 .writers = 1,
1021 .fops = &dvb_demux_fops 965 .fops = &dvb_demux_fops
1022}; 966};
1023 967
1024
1025static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file, 968static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
1026 unsigned int cmd, void *parg) 969 unsigned int cmd, void *parg)
1027{ 970{
1028 struct dvb_device *dvbdev = file->private_data; 971 struct dvb_device *dvbdev = file->private_data;
1029 struct dmxdev *dmxdev = dvbdev->priv; 972 struct dmxdev *dmxdev = dvbdev->priv;
973 int ret;
1030 974
1031 int ret=0; 975 if (mutex_lock_interruptible(&dmxdev->mutex))
1032
1033 if (down_interruptible (&dmxdev->mutex))
1034 return -ERESTARTSYS; 976 return -ERESTARTSYS;
1035 977
1036 switch (cmd) { 978 switch (cmd) {
1037 case DMX_SET_BUFFER_SIZE: 979 case DMX_SET_BUFFER_SIZE:
1038 // FIXME: implement 980 // FIXME: implement
1039 ret=0; 981 ret = 0;
1040 break; 982 break;
1041 983
1042 default: 984 default:
1043 ret=-EINVAL; 985 ret = -EINVAL;
986 break;
1044 } 987 }
1045 up(&dmxdev->mutex); 988 mutex_unlock(&dmxdev->mutex);
1046 return ret; 989 return ret;
1047} 990}
1048 991
1049
1050static int dvb_dvr_ioctl(struct inode *inode, struct file *file, 992static int dvb_dvr_ioctl(struct inode *inode, struct file *file,
1051 unsigned int cmd, unsigned long arg) 993 unsigned int cmd, unsigned long arg)
1052{ 994{
1053 return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl); 995 return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl);
1054} 996}
1055 997
1056 998static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
1057static unsigned int dvb_dvr_poll (struct file *file, poll_table *wait)
1058{ 999{
1059 struct dvb_device *dvbdev = file->private_data; 1000 struct dvb_device *dvbdev = file->private_data;
1060 struct dmxdev *dmxdev = dvbdev->priv; 1001 struct dmxdev *dmxdev = dvbdev->priv;
1061 unsigned int mask = 0; 1002 unsigned int mask = 0;
1062 1003
1063 dprintk ("function : %s\n", __FUNCTION__); 1004 dprintk("function : %s\n", __FUNCTION__);
1064 1005
1065 poll_wait(file, &dmxdev->dvr_buffer.queue, wait); 1006 poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
1066 1007
1067 if ((file->f_flags&O_ACCMODE) == O_RDONLY) { 1008 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
1068 if (dmxdev->dvr_buffer.error) 1009 if (dmxdev->dvr_buffer.error)
1069 mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR); 1010 mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
1070 1011
1071 if (dmxdev->dvr_buffer.pread!=dmxdev->dvr_buffer.pwrite) 1012 if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
1072 mask |= (POLLIN | POLLRDNORM | POLLPRI); 1013 mask |= (POLLIN | POLLRDNORM | POLLPRI);
1073 } else 1014 } else
1074 mask |= (POLLOUT | POLLWRNORM | POLLPRI); 1015 mask |= (POLLOUT | POLLWRNORM | POLLPRI);
@@ -1076,73 +1017,63 @@ static unsigned int dvb_dvr_poll (struct file *file, poll_table *wait)
1076 return mask; 1017 return mask;
1077} 1018}
1078 1019
1079
1080static struct file_operations dvb_dvr_fops = { 1020static struct file_operations dvb_dvr_fops = {
1081 .owner = THIS_MODULE, 1021 .owner = THIS_MODULE,
1082 .read = dvb_dvr_read, 1022 .read = dvb_dvr_read,
1083 .write = dvb_dvr_write, 1023 .write = dvb_dvr_write,
1084 .ioctl = dvb_dvr_ioctl, 1024 .ioctl = dvb_dvr_ioctl,
1085 .open = dvb_dvr_open, 1025 .open = dvb_dvr_open,
1086 .release = dvb_dvr_release, 1026 .release = dvb_dvr_release,
1087 .poll = dvb_dvr_poll, 1027 .poll = dvb_dvr_poll,
1088}; 1028};
1089 1029
1090static struct dvb_device dvbdev_dvr = { 1030static struct dvb_device dvbdev_dvr = {
1091 .priv = NULL, 1031 .priv = NULL,
1092 .users = 1, 1032 .users = 1,
1093 .writers = 1, 1033 .writers = 1,
1094 .fops = &dvb_dvr_fops 1034 .fops = &dvb_dvr_fops
1095}; 1035};
1096 1036
1097int 1037int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
1098dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
1099{ 1038{
1100 int i; 1039 int i;
1101 1040
1102 if (dmxdev->demux->open(dmxdev->demux) < 0) 1041 if (dmxdev->demux->open(dmxdev->demux) < 0)
1103 return -EUSERS; 1042 return -EUSERS;
1104 1043
1105 dmxdev->filter = vmalloc(dmxdev->filternum*sizeof(struct dmxdev_filter)); 1044 dmxdev->filter = vmalloc(dmxdev->filternum * sizeof(struct dmxdev_filter));
1106 if (!dmxdev->filter) 1045 if (!dmxdev->filter)
1107 return -ENOMEM; 1046 return -ENOMEM;
1108 1047
1109 dmxdev->dvr = vmalloc(dmxdev->filternum*sizeof(struct dmxdev_dvr)); 1048 mutex_init(&dmxdev->mutex);
1110 if (!dmxdev->dvr) {
1111 vfree(dmxdev->filter);
1112 dmxdev->filter = NULL;
1113 return -ENOMEM;
1114 }
1115
1116 sema_init(&dmxdev->mutex, 1);
1117 spin_lock_init(&dmxdev->lock); 1049 spin_lock_init(&dmxdev->lock);
1118 for (i=0; i<dmxdev->filternum; i++) { 1050 for (i = 0; i < dmxdev->filternum; i++) {
1119 dmxdev->filter[i].dev=dmxdev; 1051 dmxdev->filter[i].dev = dmxdev;
1120 dmxdev->filter[i].buffer.data=NULL; 1052 dmxdev->filter[i].buffer.data = NULL;
1121 dvb_dmxdev_filter_state_set(&dmxdev->filter[i], DMXDEV_STATE_FREE); 1053 dvb_dmxdev_filter_state_set(&dmxdev->filter[i],
1122 dmxdev->dvr[i].dev=dmxdev; 1054 DMXDEV_STATE_FREE);
1123 dmxdev->dvr[i].buffer.data=NULL;
1124 dvb_dmxdev_dvr_state_set(&dmxdev->dvr[i], DMXDEV_STATE_FREE);
1125 } 1055 }
1126 1056
1127 dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev, DVB_DEVICE_DEMUX); 1057 dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
1128 dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr, dmxdev, DVB_DEVICE_DVR); 1058 DVB_DEVICE_DEMUX);
1059 dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
1060 dmxdev, DVB_DEVICE_DVR);
1129 1061
1130 dvb_dmxdev_buffer_init(&dmxdev->dvr_buffer); 1062 dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
1131 1063
1132 return 0; 1064 return 0;
1133} 1065}
1066
1134EXPORT_SYMBOL(dvb_dmxdev_init); 1067EXPORT_SYMBOL(dvb_dmxdev_init);
1135 1068
1136void 1069void dvb_dmxdev_release(struct dmxdev *dmxdev)
1137dvb_dmxdev_release(struct dmxdev *dmxdev)
1138{ 1070{
1139 dvb_unregister_device(dmxdev->dvbdev); 1071 dvb_unregister_device(dmxdev->dvbdev);
1140 dvb_unregister_device(dmxdev->dvr_dvbdev); 1072 dvb_unregister_device(dmxdev->dvr_dvbdev);
1141 1073
1142 vfree(dmxdev->filter); 1074 vfree(dmxdev->filter);
1143 dmxdev->filter=NULL; 1075 dmxdev->filter = NULL;
1144 vfree(dmxdev->dvr);
1145 dmxdev->dvr=NULL;
1146 dmxdev->demux->close(dmxdev->demux); 1076 dmxdev->demux->close(dmxdev->demux);
1147} 1077}
1078
1148EXPORT_SYMBOL(dvb_dmxdev_release); 1079EXPORT_SYMBOL(dvb_dmxdev_release);
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index fd72920c2199..d2bee9ffe43c 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -30,14 +30,15 @@
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/string.h> 32#include <linux/string.h>
33#include <asm/semaphore.h> 33#include <linux/mutex.h>
34 34
35#include <linux/dvb/dmx.h> 35#include <linux/dvb/dmx.h>
36 36
37#include "dvbdev.h" 37#include "dvbdev.h"
38#include "demux.h" 38#include "demux.h"
39#include "dvb_ringbuffer.h"
39 40
40enum dmxdevype { 41enum dmxdev_type {
41 DMXDEV_TYPE_NONE, 42 DMXDEV_TYPE_NONE,
42 DMXDEV_TYPE_SEC, 43 DMXDEV_TYPE_SEC,
43 DMXDEV_TYPE_PES, 44 DMXDEV_TYPE_PES,
@@ -52,18 +53,7 @@ enum dmxdev_state {
52 DMXDEV_STATE_TIMEDOUT 53 DMXDEV_STATE_TIMEDOUT
53}; 54};
54 55
55struct dmxdev_buffer {
56 u8 *data;
57 int size;
58 int pread;
59 int pwrite;
60 wait_queue_head_t queue;
61 int error;
62};
63
64struct dmxdev_filter { 56struct dmxdev_filter {
65 struct dvb_device *dvbdev;
66
67 union { 57 union {
68 struct dmx_section_filter *sec; 58 struct dmx_section_filter *sec;
69 } filter; 59 } filter;
@@ -78,26 +68,17 @@ struct dmxdev_filter {
78 struct dmx_pes_filter_params pes; 68 struct dmx_pes_filter_params pes;
79 } params; 69 } params;
80 70
81 int type; 71 enum dmxdev_type type;
82 enum dmxdev_state state; 72 enum dmxdev_state state;
83 struct dmxdev *dev; 73 struct dmxdev *dev;
84 struct dmxdev_buffer buffer; 74 struct dvb_ringbuffer buffer;
85 75
86 struct semaphore mutex; 76 struct mutex mutex;
87 77
88 /* only for sections */ 78 /* only for sections */
89 struct timer_list timer; 79 struct timer_list timer;
90 int todo; 80 int todo;
91 u8 secheader[3]; 81 u8 secheader[3];
92
93 u16 pid;
94};
95
96
97struct dmxdev_dvr {
98 int state;
99 struct dmxdev *dev;
100 struct dmxdev_buffer buffer;
101}; 82};
102 83
103 84
@@ -106,7 +87,6 @@ struct dmxdev {
106 struct dvb_device *dvr_dvbdev; 87 struct dvb_device *dvr_dvbdev;
107 88
108 struct dmxdev_filter *filter; 89 struct dmxdev_filter *filter;
109 struct dmxdev_dvr *dvr;
110 struct dmx_demux *demux; 90 struct dmx_demux *demux;
111 91
112 int filternum; 92 int filternum;
@@ -114,10 +94,10 @@ struct dmxdev {
114#define DMXDEV_CAP_DUPLEX 1 94#define DMXDEV_CAP_DUPLEX 1
115 struct dmx_frontend *dvr_orig_fe; 95 struct dmx_frontend *dvr_orig_fe;
116 96
117 struct dmxdev_buffer dvr_buffer; 97 struct dvb_ringbuffer dvr_buffer;
118#define DVR_BUFFER_SIZE (10*188*1024) 98#define DVR_BUFFER_SIZE (10*188*1024)
119 99
120 struct semaphore mutex; 100 struct mutex mutex;
121 spinlock_t lock; 101 spinlock_t lock;
122}; 102};
123 103
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index b4c899b15959..83ec5e06c482 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -589,18 +589,18 @@ static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
589 if (pid > DMX_MAX_PID) 589 if (pid > DMX_MAX_PID)
590 return -EINVAL; 590 return -EINVAL;
591 591
592 if (down_interruptible(&demux->mutex)) 592 if (mutex_lock_interruptible(&demux->mutex))
593 return -ERESTARTSYS; 593 return -ERESTARTSYS;
594 594
595 if (ts_type & TS_DECODER) { 595 if (ts_type & TS_DECODER) {
596 if (pes_type >= DMX_TS_PES_OTHER) { 596 if (pes_type >= DMX_TS_PES_OTHER) {
597 up(&demux->mutex); 597 mutex_unlock(&demux->mutex);
598 return -EINVAL; 598 return -EINVAL;
599 } 599 }
600 600
601 if (demux->pesfilter[pes_type] && 601 if (demux->pesfilter[pes_type] &&
602 demux->pesfilter[pes_type] != feed) { 602 demux->pesfilter[pes_type] != feed) {
603 up(&demux->mutex); 603 mutex_unlock(&demux->mutex);
604 return -EINVAL; 604 return -EINVAL;
605 } 605 }
606 606
@@ -622,14 +622,14 @@ static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
622#else 622#else
623 feed->buffer = vmalloc(feed->buffer_size); 623 feed->buffer = vmalloc(feed->buffer_size);
624 if (!feed->buffer) { 624 if (!feed->buffer) {
625 up(&demux->mutex); 625 mutex_unlock(&demux->mutex);
626 return -ENOMEM; 626 return -ENOMEM;
627 } 627 }
628#endif 628#endif
629 } 629 }
630 630
631 feed->state = DMX_STATE_READY; 631 feed->state = DMX_STATE_READY;
632 up(&demux->mutex); 632 mutex_unlock(&demux->mutex);
633 633
634 return 0; 634 return 0;
635} 635}
@@ -640,21 +640,21 @@ static int dmx_ts_feed_start_filtering(struct dmx_ts_feed *ts_feed)
640 struct dvb_demux *demux = feed->demux; 640 struct dvb_demux *demux = feed->demux;
641 int ret; 641 int ret;
642 642
643 if (down_interruptible(&demux->mutex)) 643 if (mutex_lock_interruptible(&demux->mutex))
644 return -ERESTARTSYS; 644 return -ERESTARTSYS;
645 645
646 if (feed->state != DMX_STATE_READY || feed->type != DMX_TYPE_TS) { 646 if (feed->state != DMX_STATE_READY || feed->type != DMX_TYPE_TS) {
647 up(&demux->mutex); 647 mutex_unlock(&demux->mutex);
648 return -EINVAL; 648 return -EINVAL;
649 } 649 }
650 650
651 if (!demux->start_feed) { 651 if (!demux->start_feed) {
652 up(&demux->mutex); 652 mutex_unlock(&demux->mutex);
653 return -ENODEV; 653 return -ENODEV;
654 } 654 }
655 655
656 if ((ret = demux->start_feed(feed)) < 0) { 656 if ((ret = demux->start_feed(feed)) < 0) {
657 up(&demux->mutex); 657 mutex_unlock(&demux->mutex);
658 return ret; 658 return ret;
659 } 659 }
660 660
@@ -662,7 +662,7 @@ static int dmx_ts_feed_start_filtering(struct dmx_ts_feed *ts_feed)
662 ts_feed->is_filtering = 1; 662 ts_feed->is_filtering = 1;
663 feed->state = DMX_STATE_GO; 663 feed->state = DMX_STATE_GO;
664 spin_unlock_irq(&demux->lock); 664 spin_unlock_irq(&demux->lock);
665 up(&demux->mutex); 665 mutex_unlock(&demux->mutex);
666 666
667 return 0; 667 return 0;
668} 668}
@@ -673,16 +673,16 @@ static int dmx_ts_feed_stop_filtering(struct dmx_ts_feed *ts_feed)
673 struct dvb_demux *demux = feed->demux; 673 struct dvb_demux *demux = feed->demux;
674 int ret; 674 int ret;
675 675
676 if (down_interruptible(&demux->mutex)) 676 if (mutex_lock_interruptible(&demux->mutex))
677 return -ERESTARTSYS; 677 return -ERESTARTSYS;
678 678
679 if (feed->state < DMX_STATE_GO) { 679 if (feed->state < DMX_STATE_GO) {
680 up(&demux->mutex); 680 mutex_unlock(&demux->mutex);
681 return -EINVAL; 681 return -EINVAL;
682 } 682 }
683 683
684 if (!demux->stop_feed) { 684 if (!demux->stop_feed) {
685 up(&demux->mutex); 685 mutex_unlock(&demux->mutex);
686 return -ENODEV; 686 return -ENODEV;
687 } 687 }
688 688
@@ -692,7 +692,7 @@ static int dmx_ts_feed_stop_filtering(struct dmx_ts_feed *ts_feed)
692 ts_feed->is_filtering = 0; 692 ts_feed->is_filtering = 0;
693 feed->state = DMX_STATE_ALLOCATED; 693 feed->state = DMX_STATE_ALLOCATED;
694 spin_unlock_irq(&demux->lock); 694 spin_unlock_irq(&demux->lock);
695 up(&demux->mutex); 695 mutex_unlock(&demux->mutex);
696 696
697 return ret; 697 return ret;
698} 698}
@@ -704,11 +704,11 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
704 struct dvb_demux *demux = (struct dvb_demux *)dmx; 704 struct dvb_demux *demux = (struct dvb_demux *)dmx;
705 struct dvb_demux_feed *feed; 705 struct dvb_demux_feed *feed;
706 706
707 if (down_interruptible(&demux->mutex)) 707 if (mutex_lock_interruptible(&demux->mutex))
708 return -ERESTARTSYS; 708 return -ERESTARTSYS;
709 709
710 if (!(feed = dvb_dmx_feed_alloc(demux))) { 710 if (!(feed = dvb_dmx_feed_alloc(demux))) {
711 up(&demux->mutex); 711 mutex_unlock(&demux->mutex);
712 return -EBUSY; 712 return -EBUSY;
713 } 713 }
714 714
@@ -729,7 +729,7 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
729 729
730 if (!(feed->filter = dvb_dmx_filter_alloc(demux))) { 730 if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
731 feed->state = DMX_STATE_FREE; 731 feed->state = DMX_STATE_FREE;
732 up(&demux->mutex); 732 mutex_unlock(&demux->mutex);
733 return -EBUSY; 733 return -EBUSY;
734 } 734 }
735 735
@@ -737,7 +737,7 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
737 feed->filter->feed = feed; 737 feed->filter->feed = feed;
738 feed->filter->state = DMX_STATE_READY; 738 feed->filter->state = DMX_STATE_READY;
739 739
740 up(&demux->mutex); 740 mutex_unlock(&demux->mutex);
741 741
742 return 0; 742 return 0;
743} 743}
@@ -748,11 +748,11 @@ static int dvbdmx_release_ts_feed(struct dmx_demux *dmx,
748 struct dvb_demux *demux = (struct dvb_demux *)dmx; 748 struct dvb_demux *demux = (struct dvb_demux *)dmx;
749 struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; 749 struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
750 750
751 if (down_interruptible(&demux->mutex)) 751 if (mutex_lock_interruptible(&demux->mutex))
752 return -ERESTARTSYS; 752 return -ERESTARTSYS;
753 753
754 if (feed->state == DMX_STATE_FREE) { 754 if (feed->state == DMX_STATE_FREE) {
755 up(&demux->mutex); 755 mutex_unlock(&demux->mutex);
756 return -EINVAL; 756 return -EINVAL;
757 } 757 }
758#ifndef NOBUFS 758#ifndef NOBUFS
@@ -770,7 +770,7 @@ static int dvbdmx_release_ts_feed(struct dmx_demux *dmx,
770 if (feed->ts_type & TS_DECODER && feed->pes_type < DMX_TS_PES_OTHER) 770 if (feed->ts_type & TS_DECODER && feed->pes_type < DMX_TS_PES_OTHER)
771 demux->pesfilter[feed->pes_type] = NULL; 771 demux->pesfilter[feed->pes_type] = NULL;
772 772
773 up(&demux->mutex); 773 mutex_unlock(&demux->mutex);
774 return 0; 774 return 0;
775} 775}
776 776
@@ -785,12 +785,12 @@ static int dmx_section_feed_allocate_filter(struct dmx_section_feed *feed,
785 struct dvb_demux *dvbdemux = dvbdmxfeed->demux; 785 struct dvb_demux *dvbdemux = dvbdmxfeed->demux;
786 struct dvb_demux_filter *dvbdmxfilter; 786 struct dvb_demux_filter *dvbdmxfilter;
787 787
788 if (down_interruptible(&dvbdemux->mutex)) 788 if (mutex_lock_interruptible(&dvbdemux->mutex))
789 return -ERESTARTSYS; 789 return -ERESTARTSYS;
790 790
791 dvbdmxfilter = dvb_dmx_filter_alloc(dvbdemux); 791 dvbdmxfilter = dvb_dmx_filter_alloc(dvbdemux);
792 if (!dvbdmxfilter) { 792 if (!dvbdmxfilter) {
793 up(&dvbdemux->mutex); 793 mutex_unlock(&dvbdemux->mutex);
794 return -EBUSY; 794 return -EBUSY;
795 } 795 }
796 796
@@ -805,7 +805,7 @@ static int dmx_section_feed_allocate_filter(struct dmx_section_feed *feed,
805 dvbdmxfeed->filter = dvbdmxfilter; 805 dvbdmxfeed->filter = dvbdmxfilter;
806 spin_unlock_irq(&dvbdemux->lock); 806 spin_unlock_irq(&dvbdemux->lock);
807 807
808 up(&dvbdemux->mutex); 808 mutex_unlock(&dvbdemux->mutex);
809 return 0; 809 return 0;
810} 810}
811 811
@@ -819,7 +819,7 @@ static int dmx_section_feed_set(struct dmx_section_feed *feed,
819 if (pid > 0x1fff) 819 if (pid > 0x1fff)
820 return -EINVAL; 820 return -EINVAL;
821 821
822 if (down_interruptible(&dvbdmx->mutex)) 822 if (mutex_lock_interruptible(&dvbdmx->mutex))
823 return -ERESTARTSYS; 823 return -ERESTARTSYS;
824 824
825 dvb_demux_feed_add(dvbdmxfeed); 825 dvb_demux_feed_add(dvbdmxfeed);
@@ -833,13 +833,13 @@ static int dmx_section_feed_set(struct dmx_section_feed *feed,
833#else 833#else
834 dvbdmxfeed->buffer = vmalloc(dvbdmxfeed->buffer_size); 834 dvbdmxfeed->buffer = vmalloc(dvbdmxfeed->buffer_size);
835 if (!dvbdmxfeed->buffer) { 835 if (!dvbdmxfeed->buffer) {
836 up(&dvbdmx->mutex); 836 mutex_unlock(&dvbdmx->mutex);
837 return -ENOMEM; 837 return -ENOMEM;
838 } 838 }
839#endif 839#endif
840 840
841 dvbdmxfeed->state = DMX_STATE_READY; 841 dvbdmxfeed->state = DMX_STATE_READY;
842 up(&dvbdmx->mutex); 842 mutex_unlock(&dvbdmx->mutex);
843 return 0; 843 return 0;
844} 844}
845 845
@@ -871,16 +871,16 @@ static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed)
871 struct dvb_demux *dvbdmx = dvbdmxfeed->demux; 871 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
872 int ret; 872 int ret;
873 873
874 if (down_interruptible(&dvbdmx->mutex)) 874 if (mutex_lock_interruptible(&dvbdmx->mutex))
875 return -ERESTARTSYS; 875 return -ERESTARTSYS;
876 876
877 if (feed->is_filtering) { 877 if (feed->is_filtering) {
878 up(&dvbdmx->mutex); 878 mutex_unlock(&dvbdmx->mutex);
879 return -EBUSY; 879 return -EBUSY;
880 } 880 }
881 881
882 if (!dvbdmxfeed->filter) { 882 if (!dvbdmxfeed->filter) {
883 up(&dvbdmx->mutex); 883 mutex_unlock(&dvbdmx->mutex);
884 return -EINVAL; 884 return -EINVAL;
885 } 885 }
886 886
@@ -890,14 +890,14 @@ static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed)
890 dvbdmxfeed->feed.sec.seclen = 0; 890 dvbdmxfeed->feed.sec.seclen = 0;
891 891
892 if (!dvbdmx->start_feed) { 892 if (!dvbdmx->start_feed) {
893 up(&dvbdmx->mutex); 893 mutex_unlock(&dvbdmx->mutex);
894 return -ENODEV; 894 return -ENODEV;
895 } 895 }
896 896
897 prepare_secfilters(dvbdmxfeed); 897 prepare_secfilters(dvbdmxfeed);
898 898
899 if ((ret = dvbdmx->start_feed(dvbdmxfeed)) < 0) { 899 if ((ret = dvbdmx->start_feed(dvbdmxfeed)) < 0) {
900 up(&dvbdmx->mutex); 900 mutex_unlock(&dvbdmx->mutex);
901 return ret; 901 return ret;
902 } 902 }
903 903
@@ -906,7 +906,7 @@ static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed)
906 dvbdmxfeed->state = DMX_STATE_GO; 906 dvbdmxfeed->state = DMX_STATE_GO;
907 spin_unlock_irq(&dvbdmx->lock); 907 spin_unlock_irq(&dvbdmx->lock);
908 908
909 up(&dvbdmx->mutex); 909 mutex_unlock(&dvbdmx->mutex);
910 return 0; 910 return 0;
911} 911}
912 912
@@ -916,11 +916,11 @@ static int dmx_section_feed_stop_filtering(struct dmx_section_feed *feed)
916 struct dvb_demux *dvbdmx = dvbdmxfeed->demux; 916 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
917 int ret; 917 int ret;
918 918
919 if (down_interruptible(&dvbdmx->mutex)) 919 if (mutex_lock_interruptible(&dvbdmx->mutex))
920 return -ERESTARTSYS; 920 return -ERESTARTSYS;
921 921
922 if (!dvbdmx->stop_feed) { 922 if (!dvbdmx->stop_feed) {
923 up(&dvbdmx->mutex); 923 mutex_unlock(&dvbdmx->mutex);
924 return -ENODEV; 924 return -ENODEV;
925 } 925 }
926 926
@@ -931,7 +931,7 @@ static int dmx_section_feed_stop_filtering(struct dmx_section_feed *feed)
931 feed->is_filtering = 0; 931 feed->is_filtering = 0;
932 spin_unlock_irq(&dvbdmx->lock); 932 spin_unlock_irq(&dvbdmx->lock);
933 933
934 up(&dvbdmx->mutex); 934 mutex_unlock(&dvbdmx->mutex);
935 return ret; 935 return ret;
936} 936}
937 937
@@ -942,11 +942,11 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
942 struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; 942 struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
943 struct dvb_demux *dvbdmx = dvbdmxfeed->demux; 943 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
944 944
945 if (down_interruptible(&dvbdmx->mutex)) 945 if (mutex_lock_interruptible(&dvbdmx->mutex))
946 return -ERESTARTSYS; 946 return -ERESTARTSYS;
947 947
948 if (dvbdmxfilter->feed != dvbdmxfeed) { 948 if (dvbdmxfilter->feed != dvbdmxfeed) {
949 up(&dvbdmx->mutex); 949 mutex_unlock(&dvbdmx->mutex);
950 return -EINVAL; 950 return -EINVAL;
951 } 951 }
952 952
@@ -966,7 +966,7 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
966 966
967 dvbdmxfilter->state = DMX_STATE_FREE; 967 dvbdmxfilter->state = DMX_STATE_FREE;
968 spin_unlock_irq(&dvbdmx->lock); 968 spin_unlock_irq(&dvbdmx->lock);
969 up(&dvbdmx->mutex); 969 mutex_unlock(&dvbdmx->mutex);
970 return 0; 970 return 0;
971} 971}
972 972
@@ -977,11 +977,11 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
977 struct dvb_demux *dvbdmx = (struct dvb_demux *)demux; 977 struct dvb_demux *dvbdmx = (struct dvb_demux *)demux;
978 struct dvb_demux_feed *dvbdmxfeed; 978 struct dvb_demux_feed *dvbdmxfeed;
979 979
980 if (down_interruptible(&dvbdmx->mutex)) 980 if (mutex_lock_interruptible(&dvbdmx->mutex))
981 return -ERESTARTSYS; 981 return -ERESTARTSYS;
982 982
983 if (!(dvbdmxfeed = dvb_dmx_feed_alloc(dvbdmx))) { 983 if (!(dvbdmxfeed = dvb_dmx_feed_alloc(dvbdmx))) {
984 up(&dvbdmx->mutex); 984 mutex_unlock(&dvbdmx->mutex);
985 return -EBUSY; 985 return -EBUSY;
986 } 986 }
987 987
@@ -1006,7 +1006,7 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
1006 (*feed)->stop_filtering = dmx_section_feed_stop_filtering; 1006 (*feed)->stop_filtering = dmx_section_feed_stop_filtering;
1007 (*feed)->release_filter = dmx_section_feed_release_filter; 1007 (*feed)->release_filter = dmx_section_feed_release_filter;
1008 1008
1009 up(&dvbdmx->mutex); 1009 mutex_unlock(&dvbdmx->mutex);
1010 return 0; 1010 return 0;
1011} 1011}
1012 1012
@@ -1016,11 +1016,11 @@ static int dvbdmx_release_section_feed(struct dmx_demux *demux,
1016 struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; 1016 struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
1017 struct dvb_demux *dvbdmx = (struct dvb_demux *)demux; 1017 struct dvb_demux *dvbdmx = (struct dvb_demux *)demux;
1018 1018
1019 if (down_interruptible(&dvbdmx->mutex)) 1019 if (mutex_lock_interruptible(&dvbdmx->mutex))
1020 return -ERESTARTSYS; 1020 return -ERESTARTSYS;
1021 1021
1022 if (dvbdmxfeed->state == DMX_STATE_FREE) { 1022 if (dvbdmxfeed->state == DMX_STATE_FREE) {
1023 up(&dvbdmx->mutex); 1023 mutex_unlock(&dvbdmx->mutex);
1024 return -EINVAL; 1024 return -EINVAL;
1025 } 1025 }
1026#ifndef NOBUFS 1026#ifndef NOBUFS
@@ -1033,7 +1033,7 @@ static int dvbdmx_release_section_feed(struct dmx_demux *demux,
1033 1033
1034 dvbdmxfeed->pid = 0xffff; 1034 dvbdmxfeed->pid = 0xffff;
1035 1035
1036 up(&dvbdmx->mutex); 1036 mutex_unlock(&dvbdmx->mutex);
1037 return 0; 1037 return 0;
1038} 1038}
1039 1039
@@ -1071,10 +1071,10 @@ static int dvbdmx_write(struct dmx_demux *demux, const char *buf, size_t count)
1071 if ((!demux->frontend) || (demux->frontend->source != DMX_MEMORY_FE)) 1071 if ((!demux->frontend) || (demux->frontend->source != DMX_MEMORY_FE))
1072 return -EINVAL; 1072 return -EINVAL;
1073 1073
1074 if (down_interruptible(&dvbdemux->mutex)) 1074 if (mutex_lock_interruptible(&dvbdemux->mutex))
1075 return -ERESTARTSYS; 1075 return -ERESTARTSYS;
1076 dvb_dmx_swfilter(dvbdemux, buf, count); 1076 dvb_dmx_swfilter(dvbdemux, buf, count);
1077 up(&dvbdemux->mutex); 1077 mutex_unlock(&dvbdemux->mutex);
1078 1078
1079 if (signal_pending(current)) 1079 if (signal_pending(current))
1080 return -EINTR; 1080 return -EINTR;
@@ -1126,11 +1126,11 @@ static int dvbdmx_connect_frontend(struct dmx_demux *demux,
1126 if (demux->frontend) 1126 if (demux->frontend)
1127 return -EINVAL; 1127 return -EINVAL;
1128 1128
1129 if (down_interruptible(&dvbdemux->mutex)) 1129 if (mutex_lock_interruptible(&dvbdemux->mutex))
1130 return -ERESTARTSYS; 1130 return -ERESTARTSYS;
1131 1131
1132 demux->frontend = frontend; 1132 demux->frontend = frontend;
1133 up(&dvbdemux->mutex); 1133 mutex_unlock(&dvbdemux->mutex);
1134 return 0; 1134 return 0;
1135} 1135}
1136 1136
@@ -1138,11 +1138,11 @@ static int dvbdmx_disconnect_frontend(struct dmx_demux *demux)
1138{ 1138{
1139 struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; 1139 struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
1140 1140
1141 if (down_interruptible(&dvbdemux->mutex)) 1141 if (mutex_lock_interruptible(&dvbdemux->mutex))
1142 return -ERESTARTSYS; 1142 return -ERESTARTSYS;
1143 1143
1144 demux->frontend = NULL; 1144 demux->frontend = NULL;
1145 up(&dvbdemux->mutex); 1145 mutex_unlock(&dvbdemux->mutex);
1146 return 0; 1146 return 0;
1147} 1147}
1148 1148
@@ -1215,7 +1215,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1215 dmx->disconnect_frontend = dvbdmx_disconnect_frontend; 1215 dmx->disconnect_frontend = dvbdmx_disconnect_frontend;
1216 dmx->get_pes_pids = dvbdmx_get_pes_pids; 1216 dmx->get_pes_pids = dvbdmx_get_pes_pids;
1217 1217
1218 sema_init(&dvbdemux->mutex, 1); 1218 mutex_init(&dvbdemux->mutex);
1219 spin_lock_init(&dvbdemux->lock); 1219 spin_lock_init(&dvbdemux->lock);
1220 1220
1221 return 0; 1221 return 0;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 0cc888339d52..2c5f915329ca 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -26,7 +26,7 @@
26#include <linux/time.h> 26#include <linux/time.h>
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <asm/semaphore.h> 29#include <linux/mutex.h>
30 30
31#include "demux.h" 31#include "demux.h"
32 32
@@ -125,7 +125,7 @@ struct dvb_demux {
125 u8 tsbuf[204]; 125 u8 tsbuf[204];
126 int tsbufp; 126 int tsbufp;
127 127
128 struct semaphore mutex; 128 struct mutex mutex;
129 spinlock_t lock; 129 spinlock_t lock;
130}; 130};
131 131
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 771f32d889e6..2c3ea8f95dcd 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -37,7 +37,6 @@
37#include <linux/suspend.h> 37#include <linux/suspend.h>
38#include <linux/jiffies.h> 38#include <linux/jiffies.h>
39#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/semaphore.h>
41 40
42#include "dvb_frontend.h" 41#include "dvb_frontend.h"
43#include "dvbdev.h" 42#include "dvbdev.h"
@@ -50,13 +49,13 @@ static int dvb_powerdown_on_sleep = 1;
50 49
51module_param_named(frontend_debug, dvb_frontend_debug, int, 0644); 50module_param_named(frontend_debug, dvb_frontend_debug, int, 0644);
52MODULE_PARM_DESC(frontend_debug, "Turn on/off frontend core debugging (default:off)."); 51MODULE_PARM_DESC(frontend_debug, "Turn on/off frontend core debugging (default:off).");
53module_param(dvb_shutdown_timeout, int, 0444); 52module_param(dvb_shutdown_timeout, int, 0644);
54MODULE_PARM_DESC(dvb_shutdown_timeout, "wait <shutdown_timeout> seconds after close() before suspending hardware"); 53MODULE_PARM_DESC(dvb_shutdown_timeout, "wait <shutdown_timeout> seconds after close() before suspending hardware");
55module_param(dvb_force_auto_inversion, int, 0444); 54module_param(dvb_force_auto_inversion, int, 0644);
56MODULE_PARM_DESC(dvb_force_auto_inversion, "0: normal (default), 1: INVERSION_AUTO forced always"); 55MODULE_PARM_DESC(dvb_force_auto_inversion, "0: normal (default), 1: INVERSION_AUTO forced always");
57module_param(dvb_override_tune_delay, int, 0444); 56module_param(dvb_override_tune_delay, int, 0644);
58MODULE_PARM_DESC(dvb_override_tune_delay, "0: normal (default), >0 => delay in milliseconds to wait for lock after a tune attempt"); 57MODULE_PARM_DESC(dvb_override_tune_delay, "0: normal (default), >0 => delay in milliseconds to wait for lock after a tune attempt");
59module_param(dvb_powerdown_on_sleep, int, 0444); 58module_param(dvb_powerdown_on_sleep, int, 0644);
60MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB volatage off on sleep (default)"); 59MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB volatage off on sleep (default)");
61 60
62#define dprintk if (dvb_frontend_debug) printk 61#define dprintk if (dvb_frontend_debug) printk
@@ -88,7 +87,7 @@ MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB vola
88 * FESTATE_LOSTLOCK. When the lock has been lost, and we're searching it again. 87 * FESTATE_LOSTLOCK. When the lock has been lost, and we're searching it again.
89 */ 88 */
90 89
91static DECLARE_MUTEX(frontend_mutex); 90static DEFINE_MUTEX(frontend_mutex);
92 91
93struct dvb_frontend_private { 92struct dvb_frontend_private {
94 93
@@ -1021,12 +1020,12 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
1021 1020
1022 dprintk ("%s\n", __FUNCTION__); 1021 dprintk ("%s\n", __FUNCTION__);
1023 1022
1024 if (down_interruptible (&frontend_mutex)) 1023 if (mutex_lock_interruptible(&frontend_mutex))
1025 return -ERESTARTSYS; 1024 return -ERESTARTSYS;
1026 1025
1027 fe->frontend_priv = kzalloc(sizeof(struct dvb_frontend_private), GFP_KERNEL); 1026 fe->frontend_priv = kzalloc(sizeof(struct dvb_frontend_private), GFP_KERNEL);
1028 if (fe->frontend_priv == NULL) { 1027 if (fe->frontend_priv == NULL) {
1029 up(&frontend_mutex); 1028 mutex_unlock(&frontend_mutex);
1030 return -ENOMEM; 1029 return -ENOMEM;
1031 } 1030 }
1032 fepriv = fe->frontend_priv; 1031 fepriv = fe->frontend_priv;
@@ -1045,7 +1044,7 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
1045 dvb_register_device (fe->dvb, &fepriv->dvbdev, &dvbdev_template, 1044 dvb_register_device (fe->dvb, &fepriv->dvbdev, &dvbdev_template,
1046 fe, DVB_DEVICE_FRONTEND); 1045 fe, DVB_DEVICE_FRONTEND);
1047 1046
1048 up (&frontend_mutex); 1047 mutex_unlock(&frontend_mutex);
1049 return 0; 1048 return 0;
1050} 1049}
1051EXPORT_SYMBOL(dvb_register_frontend); 1050EXPORT_SYMBOL(dvb_register_frontend);
@@ -1055,7 +1054,7 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
1055 struct dvb_frontend_private *fepriv = fe->frontend_priv; 1054 struct dvb_frontend_private *fepriv = fe->frontend_priv;
1056 dprintk ("%s\n", __FUNCTION__); 1055 dprintk ("%s\n", __FUNCTION__);
1057 1056
1058 down (&frontend_mutex); 1057 mutex_lock(&frontend_mutex);
1059 dvb_unregister_device (fepriv->dvbdev); 1058 dvb_unregister_device (fepriv->dvbdev);
1060 dvb_frontend_stop (fe); 1059 dvb_frontend_stop (fe);
1061 if (fe->ops->release) 1060 if (fe->ops->release)
@@ -1064,7 +1063,7 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
1064 printk("dvb_frontend: Demodulator (%s) does not have a release callback!\n", fe->ops->info.name); 1063 printk("dvb_frontend: Demodulator (%s) does not have a release callback!\n", fe->ops->info.name);
1065 /* fe is invalid now */ 1064 /* fe is invalid now */
1066 kfree(fepriv); 1065 kfree(fepriv);
1067 up (&frontend_mutex); 1066 mutex_unlock(&frontend_mutex);
1068 return 0; 1067 return 0;
1069} 1068}
1070EXPORT_SYMBOL(dvb_unregister_frontend); 1069EXPORT_SYMBOL(dvb_unregister_frontend);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index 70a6d14efda7..d5aee5ad67a0 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -104,6 +104,7 @@ struct dvb_frontend {
104 struct dvb_adapter *dvb; 104 struct dvb_adapter *dvb;
105 void* demodulator_priv; 105 void* demodulator_priv;
106 void* frontend_priv; 106 void* frontend_priv;
107 void* misc_priv;
107}; 108};
108 109
109extern int dvb_register_frontend(struct dvb_adapter* dvb, 110extern int dvb_register_frontend(struct dvb_adapter* dvb,
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 6711eb6a058c..2f0f35811bf7 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -62,6 +62,7 @@
62#include <linux/uio.h> 62#include <linux/uio.h>
63#include <asm/uaccess.h> 63#include <asm/uaccess.h>
64#include <linux/crc32.h> 64#include <linux/crc32.h>
65#include <linux/mutex.h>
65 66
66#include "dvb_demux.h" 67#include "dvb_demux.h"
67#include "dvb_net.h" 68#include "dvb_net.h"
@@ -151,8 +152,7 @@ struct dvb_net_priv {
151 unsigned char ule_bridged; /* Whether the ULE_BRIDGED extension header was found. */ 152 unsigned char ule_bridged; /* Whether the ULE_BRIDGED extension header was found. */
152 int ule_sndu_remain; /* Nr. of bytes still required for current ULE SNDU. */ 153 int ule_sndu_remain; /* Nr. of bytes still required for current ULE SNDU. */
153 unsigned long ts_count; /* Current ts cell counter. */ 154 unsigned long ts_count; /* Current ts cell counter. */
154 155 struct mutex mutex;
155 struct semaphore mutex;
156}; 156};
157 157
158 158
@@ -889,7 +889,7 @@ static int dvb_net_feed_start(struct net_device *dev)
889 unsigned char *mac = (unsigned char *) dev->dev_addr; 889 unsigned char *mac = (unsigned char *) dev->dev_addr;
890 890
891 dprintk("%s: rx_mode %i\n", __FUNCTION__, priv->rx_mode); 891 dprintk("%s: rx_mode %i\n", __FUNCTION__, priv->rx_mode);
892 down(&priv->mutex); 892 mutex_lock(&priv->mutex);
893 if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0]) 893 if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
894 printk("%s: BUG %d\n", __FUNCTION__, __LINE__); 894 printk("%s: BUG %d\n", __FUNCTION__, __LINE__);
895 895
@@ -974,7 +974,7 @@ static int dvb_net_feed_start(struct net_device *dev)
974 ret = -EINVAL; 974 ret = -EINVAL;
975 975
976error: 976error:
977 up(&priv->mutex); 977 mutex_unlock(&priv->mutex);
978 return ret; 978 return ret;
979} 979}
980 980
@@ -984,7 +984,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
984 int i, ret = 0; 984 int i, ret = 0;
985 985
986 dprintk("%s\n", __FUNCTION__); 986 dprintk("%s\n", __FUNCTION__);
987 down(&priv->mutex); 987 mutex_lock(&priv->mutex);
988 if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) { 988 if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
989 if (priv->secfeed) { 989 if (priv->secfeed) {
990 if (priv->secfeed->is_filtering) { 990 if (priv->secfeed->is_filtering) {
@@ -1026,7 +1026,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
1026 printk("%s: no ts feed to stop\n", dev->name); 1026 printk("%s: no ts feed to stop\n", dev->name);
1027 } else 1027 } else
1028 ret = -EINVAL; 1028 ret = -EINVAL;
1029 up(&priv->mutex); 1029 mutex_unlock(&priv->mutex);
1030 return ret; 1030 return ret;
1031} 1031}
1032 1032
@@ -1208,7 +1208,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
1208 1208
1209 INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net); 1209 INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net);
1210 INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net); 1210 INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net);
1211 init_MUTEX(&priv->mutex); 1211 mutex_init(&priv->mutex);
1212 1212
1213 net->base_addr = pid; 1213 net->base_addr = pid;
1214 1214
diff --git a/drivers/media/dvb/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb/dvb-core/dvb_ringbuffer.c
index 77ad2410f4d3..c972fe014c58 100644
--- a/drivers/media/dvb/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb/dvb-core/dvb_ringbuffer.c
@@ -45,6 +45,7 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
45 rbuf->pread=rbuf->pwrite=0; 45 rbuf->pread=rbuf->pwrite=0;
46 rbuf->data=data; 46 rbuf->data=data;
47 rbuf->size=len; 47 rbuf->size=len;
48 rbuf->error=0;
48 49
49 init_waitqueue_head(&rbuf->queue); 50 init_waitqueue_head(&rbuf->queue);
50 51
@@ -87,6 +88,7 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
87void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf) 88void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
88{ 89{
89 rbuf->pread = rbuf->pwrite; 90 rbuf->pread = rbuf->pwrite;
91 rbuf->error = 0;
90} 92}
91 93
92 94
diff --git a/drivers/media/dvb/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
index 6d2560972771..d97714e75736 100644
--- a/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
@@ -35,6 +35,7 @@ struct dvb_ringbuffer {
35 ssize_t size; 35 ssize_t size;
36 ssize_t pread; 36 ssize_t pread;
37 ssize_t pwrite; 37 ssize_t pwrite;
38 int error;
38 39
39 wait_queue_head_t queue; 40 wait_queue_head_t queue;
40 spinlock_t lock; 41 spinlock_t lock;
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 162f9795cd89..e14bf43941e3 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -77,7 +77,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num)
77 struct dvb_usb_device *d = i2c_get_adapdata(adap); 77 struct dvb_usb_device *d = i2c_get_adapdata(adap);
78 int i; 78 int i;
79 79
80 if (down_interruptible(&d->i2c_sem) < 0) 80 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
81 return -EAGAIN; 81 return -EAGAIN;
82 82
83 if (num > 2) 83 if (num > 2)
@@ -126,7 +126,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num)
126 } 126 }
127 } 127 }
128 128
129 up(&d->i2c_sem); 129 mutex_unlock(&d->i2c_mutex);
130 return i; 130 return i;
131} 131}
132 132
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 269d899da488..2d52b76671d3 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -128,7 +128,7 @@ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
128 struct dvb_usb_device *d = i2c_get_adapdata(adap); 128 struct dvb_usb_device *d = i2c_get_adapdata(adap);
129 int i; 129 int i;
130 130
131 if (down_interruptible(&d->i2c_sem) < 0) 131 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
132 return -EAGAIN; 132 return -EAGAIN;
133 133
134 if (num > 2) 134 if (num > 2)
@@ -146,7 +146,7 @@ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
146 break; 146 break;
147 } 147 }
148 148
149 up(&d->i2c_sem); 149 mutex_unlock(&d->i2c_mutex);
150 return i; 150 return i;
151} 151}
152 152
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index caa1346e3063..91136c00ce9d 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -48,7 +48,7 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
48 struct dvb_usb_device *d = i2c_get_adapdata(adap); 48 struct dvb_usb_device *d = i2c_get_adapdata(adap);
49 int i; 49 int i;
50 50
51 if (down_interruptible(&d->i2c_sem) < 0) 51 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
52 return -EAGAIN; 52 return -EAGAIN;
53 53
54 if (num > 2) 54 if (num > 2)
@@ -67,7 +67,7 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
67 break; 67 break;
68 } 68 }
69 69
70 up(&d->i2c_sem); 70 mutex_unlock(&d->i2c_mutex);
71 return i; 71 return i;
72} 72}
73 73
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-init.c b/drivers/media/dvb/dvb-usb/dvb-usb-init.c
index ce34a55e5c24..a1705ecb9a54 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-init.c
@@ -42,8 +42,8 @@ static int dvb_usb_init(struct dvb_usb_device *d)
42{ 42{
43 int ret = 0; 43 int ret = 0;
44 44
45 sema_init(&d->usb_sem, 1); 45 mutex_init(&d->usb_mutex);
46 sema_init(&d->i2c_sem, 1); 46 mutex_init(&d->i2c_mutex);
47 47
48 d->state = DVB_USB_STATE_INIT; 48 d->state = DVB_USB_STATE_INIT;
49 49
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index ee821974dc60..9002f35aa952 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -21,7 +21,7 @@ int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
21 if (wbuf == NULL || wlen == 0) 21 if (wbuf == NULL || wlen == 0)
22 return -EINVAL; 22 return -EINVAL;
23 23
24 if ((ret = down_interruptible(&d->usb_sem))) 24 if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
25 return ret; 25 return ret;
26 26
27 deb_xfer(">>> "); 27 deb_xfer(">>> ");
@@ -53,7 +53,7 @@ int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
53 } 53 }
54 } 54 }
55 55
56 up(&d->usb_sem); 56 mutex_unlock(&d->usb_mutex);
57 return ret; 57 return ret;
58} 58}
59EXPORT_SYMBOL(dvb_usb_generic_rw); 59EXPORT_SYMBOL(dvb_usb_generic_rw);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index d4909e5c67e0..fead958a57e3 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -12,6 +12,7 @@
12#include <linux/input.h> 12#include <linux/input.h>
13#include <linux/usb.h> 13#include <linux/usb.h>
14#include <linux/firmware.h> 14#include <linux/firmware.h>
15#include <linux/mutex.h>
15 16
16#include "dvb_frontend.h" 17#include "dvb_frontend.h"
17#include "dvb_demux.h" 18#include "dvb_demux.h"
@@ -227,8 +228,8 @@ struct dvb_usb_properties {
227 * @feedcount: number of reqested feeds (used for streaming-activation) 228 * @feedcount: number of reqested feeds (used for streaming-activation)
228 * @pid_filtering: is hardware pid_filtering used or not. 229 * @pid_filtering: is hardware pid_filtering used or not.
229 * 230 *
230 * @usb_sem: semaphore of USB control messages (reading needs two messages) 231 * @usb_mutex: semaphore of USB control messages (reading needs two messages)
231 * @i2c_sem: semaphore for i2c-transfers 232 * @i2c_mutex: semaphore for i2c-transfers
232 * 233 *
233 * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB 234 * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB
234 * @pll_addr: I2C address of the tuner for programming 235 * @pll_addr: I2C address of the tuner for programming
@@ -283,10 +284,10 @@ struct dvb_usb_device {
283 int pid_filtering; 284 int pid_filtering;
284 285
285 /* locking */ 286 /* locking */
286 struct semaphore usb_sem; 287 struct mutex usb_mutex;
287 288
288 /* i2c */ 289 /* i2c */
289 struct semaphore i2c_sem; 290 struct mutex i2c_mutex;
290 struct i2c_adapter i2c_adap; 291 struct i2c_adapter i2c_adap;
291 292
292 /* tuner programming information */ 293 /* tuner programming information */
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index 4a95eca81c5c..b2f098a2d5f7 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -75,7 +75,7 @@ int vp702x_usb_inout_op(struct dvb_usb_device *d, u8 *o, int olen, u8 *i, int il
75{ 75{
76 int ret; 76 int ret;
77 77
78 if ((ret = down_interruptible(&d->usb_sem))) 78 if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
79 return ret; 79 return ret;
80 80
81 if ((ret = vp702x_usb_out_op(d,REQUEST_OUT,0,0,o,olen)) < 0) 81 if ((ret = vp702x_usb_out_op(d,REQUEST_OUT,0,0,o,olen)) < 0)
@@ -84,7 +84,7 @@ int vp702x_usb_inout_op(struct dvb_usb_device *d, u8 *o, int olen, u8 *i, int il
84 ret = vp702x_usb_in_op(d,REQUEST_IN,0,0,i,ilen); 84 ret = vp702x_usb_in_op(d,REQUEST_IN,0,0,i,ilen);
85 85
86unlock: 86unlock:
87 up(&d->usb_sem); 87 mutex_unlock(&d->usb_mutex);
88 88
89 return ret; 89 return ret;
90} 90}
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 3835235b68df..8ea3834a6cf8 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -38,7 +38,7 @@ int vp7045_usb_op(struct dvb_usb_device *d, u8 cmd, u8 *out, int outlen, u8 *in,
38 deb_xfer("out buffer: "); 38 deb_xfer("out buffer: ");
39 debug_dump(outbuf,outlen+1,deb_xfer); 39 debug_dump(outbuf,outlen+1,deb_xfer);
40 40
41 if ((ret = down_interruptible(&d->usb_sem))) 41 if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
42 return ret; 42 return ret;
43 43
44 if (usb_control_msg(d->udev, 44 if (usb_control_msg(d->udev,
@@ -68,7 +68,7 @@ int vp7045_usb_op(struct dvb_usb_device *d, u8 cmd, u8 *out, int outlen, u8 *in,
68 memcpy(in,&inbuf[1],inlen); 68 memcpy(in,&inbuf[1],inlen);
69 69
70unlock: 70unlock:
71 up(&d->usb_sem); 71 mutex_unlock(&d->usb_mutex);
72 72
73 return ret; 73 return ret;
74} 74}
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index c676b1e23ab0..94233168d241 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -116,6 +116,12 @@ config DVB_MT352
116 help 116 help
117 A DVB-T tuner module. Say Y when you want to support this frontend. 117 A DVB-T tuner module. Say Y when you want to support this frontend.
118 118
119config DVB_ZL10353
120 tristate "Zarlink ZL10353 based"
121 depends on DVB_CORE
122 help
123 A DVB-T tuner module. Say Y when you want to support this frontend.
124
119config DVB_DIB3000MB 125config DVB_DIB3000MB
120 tristate "DiBcom 3000M-B" 126 tristate "DiBcom 3000M-B"
121 depends on DVB_CORE 127 depends on DVB_CORE
@@ -155,7 +161,7 @@ comment "ATSC (North American/Korean Terresterial DTV) frontends"
155 depends on DVB_CORE 161 depends on DVB_CORE
156 162
157config DVB_NXT200X 163config DVB_NXT200X
158 tristate "Nextwave NXT2002/NXT2004 based" 164 tristate "NxtWave Communications NXT2002/NXT2004 based"
159 depends on DVB_CORE 165 depends on DVB_CORE
160 select FW_LOADER 166 select FW_LOADER
161 help 167 help
@@ -169,14 +175,14 @@ config DVB_NXT200X
169 or /lib/firmware (depending on configuration of firmware hotplug). 175 or /lib/firmware (depending on configuration of firmware hotplug).
170 176
171config DVB_OR51211 177config DVB_OR51211
172 tristate "or51211 based (pcHDTV HD2000 card)" 178 tristate "Oren OR51211 based"
173 depends on DVB_CORE 179 depends on DVB_CORE
174 select FW_LOADER 180 select FW_LOADER
175 help 181 help
176 An ATSC 8VSB tuner module. Say Y when you want to support this frontend. 182 An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
177 183
178config DVB_OR51132 184config DVB_OR51132
179 tristate "OR51132 based (pcHDTV HD3000 card)" 185 tristate "Oren OR51132 based"
180 depends on DVB_CORE 186 depends on DVB_CORE
181 select FW_LOADER 187 select FW_LOADER
182 help 188 help
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 1af769cd90c0..d09b6071fbaf 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_DVB_TDA1004X) += tda1004x.o
20obj-$(CONFIG_DVB_SP887X) += sp887x.o 20obj-$(CONFIG_DVB_SP887X) += sp887x.o
21obj-$(CONFIG_DVB_NXT6000) += nxt6000.o 21obj-$(CONFIG_DVB_NXT6000) += nxt6000.o
22obj-$(CONFIG_DVB_MT352) += mt352.o 22obj-$(CONFIG_DVB_MT352) += mt352.o
23obj-$(CONFIG_DVB_ZL10353) += zl10353.o
23obj-$(CONFIG_DVB_CX22702) += cx22702.o 24obj-$(CONFIG_DVB_CX22702) += cx22702.o
24obj-$(CONFIG_DVB_TDA10021) += tda10021.o 25obj-$(CONFIG_DVB_TDA10021) += tda10021.o
25obj-$(CONFIG_DVB_STV0297) += stv0297.o 26obj-$(CONFIG_DVB_STV0297) += stv0297.o
diff --git a/drivers/media/dvb/frontends/bcm3510.c b/drivers/media/dvb/frontends/bcm3510.c
index caaee893ca76..1708a1d4893e 100644
--- a/drivers/media/dvb/frontends/bcm3510.c
+++ b/drivers/media/dvb/frontends/bcm3510.c
@@ -39,6 +39,7 @@
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/string.h> 40#include <linux/string.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/mutex.h>
42 43
43#include "dvb_frontend.h" 44#include "dvb_frontend.h"
44#include "bcm3510.h" 45#include "bcm3510.h"
@@ -52,7 +53,7 @@ struct bcm3510_state {
52 struct dvb_frontend frontend; 53 struct dvb_frontend frontend;
53 54
54 /* demodulator private data */ 55 /* demodulator private data */
55 struct semaphore hab_sem; 56 struct mutex hab_mutex;
56 u8 firmware_loaded:1; 57 u8 firmware_loaded:1;
57 58
58 unsigned long next_status_check; 59 unsigned long next_status_check;
@@ -213,7 +214,7 @@ static int bcm3510_do_hab_cmd(struct bcm3510_state *st, u8 cmd, u8 msgid, u8 *ob
213 dbufout(ob,olen+2,deb_hab); 214 dbufout(ob,olen+2,deb_hab);
214 deb_hab("\n"); 215 deb_hab("\n");
215 216
216 if (down_interruptible(&st->hab_sem) < 0) 217 if (mutex_lock_interruptible(&st->hab_mutex) < 0)
217 return -EAGAIN; 218 return -EAGAIN;
218 219
219 if ((ret = bcm3510_hab_send_request(st, ob, olen+2)) < 0 || 220 if ((ret = bcm3510_hab_send_request(st, ob, olen+2)) < 0 ||
@@ -226,7 +227,7 @@ static int bcm3510_do_hab_cmd(struct bcm3510_state *st, u8 cmd, u8 msgid, u8 *ob
226 227
227 memcpy(ibuf,&ib[2],ilen); 228 memcpy(ibuf,&ib[2],ilen);
228error: 229error:
229 up(&st->hab_sem); 230 mutex_unlock(&st->hab_mutex);
230 return ret; 231 return ret;
231} 232}
232 233
@@ -796,7 +797,7 @@ struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config,
796 state->frontend.ops = &state->ops; 797 state->frontend.ops = &state->ops;
797 state->frontend.demodulator_priv = state; 798 state->frontend.demodulator_priv = state;
798 799
799 sema_init(&state->hab_sem, 1); 800 mutex_init(&state->hab_mutex);
800 801
801 if ((ret = bcm3510_readB(state,0xe0,&v)) < 0) 802 if ((ret = bcm3510_readB(state,0xe0,&v)) < 0)
802 goto error; 803 goto error;
diff --git a/drivers/media/dvb/frontends/bsbe1.h b/drivers/media/dvb/frontends/bsbe1.h
new file mode 100644
index 000000000000..78573b22ada9
--- /dev/null
+++ b/drivers/media/dvb/frontends/bsbe1.h
@@ -0,0 +1,123 @@
1/*
2 * bsbe1.h - ALPS BSBE1 tuner support (moved from av7110.c)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
20 *
21 *
22 * the project's page is at http://www.linuxtv.org
23 */
24
25#ifndef BSBE1_H
26#define BSBE1_H
27
28static u8 alps_bsbe1_inittab[] = {
29 0x01, 0x15,
30 0x02, 0x30,
31 0x03, 0x00,
32 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
33 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
34 0x06, 0x40, /* DAC not used, set to high impendance mode */
35 0x07, 0x00, /* DAC LSB */
36 0x08, 0x40, /* DiSEqC off, LNB power on OP2/LOCK pin on */
37 0x09, 0x00, /* FIFO */
38 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
39 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
40 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
41 0x10, 0x3f, // AGC2 0x3d
42 0x11, 0x84,
43 0x12, 0xb9,
44 0x15, 0xc9, // lock detector threshold
45 0x16, 0x00,
46 0x17, 0x00,
47 0x18, 0x00,
48 0x19, 0x00,
49 0x1a, 0x00,
50 0x1f, 0x50,
51 0x20, 0x00,
52 0x21, 0x00,
53 0x22, 0x00,
54 0x23, 0x00,
55 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
56 0x29, 0x1e, // 1/2 threshold
57 0x2a, 0x14, // 2/3 threshold
58 0x2b, 0x0f, // 3/4 threshold
59 0x2c, 0x09, // 5/6 threshold
60 0x2d, 0x05, // 7/8 threshold
61 0x2e, 0x01,
62 0x31, 0x1f, // test all FECs
63 0x32, 0x19, // viterbi and synchro search
64 0x33, 0xfc, // rs control
65 0x34, 0x93, // error control
66 0x0f, 0x92,
67 0xff, 0xff
68};
69
70
71static int alps_bsbe1_set_symbol_rate(struct dvb_frontend* fe, u32 srate, u32 ratio)
72{
73 u8 aclk = 0;
74 u8 bclk = 0;
75
76 if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; }
77 else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; }
78 else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; }
79 else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; }
80 else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; }
81 else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; }
82
83 stv0299_writereg(fe, 0x13, aclk);
84 stv0299_writereg(fe, 0x14, bclk);
85 stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
86 stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
87 stv0299_writereg(fe, 0x21, (ratio ) & 0xf0);
88
89 return 0;
90}
91
92static int alps_bsbe1_pll_set(struct dvb_frontend* fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters* params)
93{
94 int ret;
95 u8 data[4];
96 u32 div;
97 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
98
99 if ((params->frequency < 950000) || (params->frequency > 2150000))
100 return -EINVAL;
101
102 div = (params->frequency + (125 - 1)) / 125; // round correctly
103 data[0] = (div >> 8) & 0x7f;
104 data[1] = div & 0xff;
105 data[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
106 data[3] = (params->frequency > 1530000) ? 0xE0 : 0xE4;
107
108 ret = i2c_transfer(i2c, &msg, 1);
109 return (ret != 1) ? -EIO : 0;
110}
111
112static struct stv0299_config alps_bsbe1_config = {
113 .demod_address = 0x68,
114 .inittab = alps_bsbe1_inittab,
115 .mclk = 88000000UL,
116 .invert = 1,
117 .skip_reinit = 0,
118 .min_delay_ms = 100,
119 .set_symbol_rate = alps_bsbe1_set_symbol_rate,
120 .pll_set = alps_bsbe1_pll_set,
121};
122
123#endif
diff --git a/drivers/media/dvb/frontends/bsru6.h b/drivers/media/dvb/frontends/bsru6.h
new file mode 100644
index 000000000000..2a5366ce79cc
--- /dev/null
+++ b/drivers/media/dvb/frontends/bsru6.h
@@ -0,0 +1,140 @@
1/*
2 * bsru6.h - ALPS BSRU6 tuner support (moved from budget-ci.c)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
20 *
21 *
22 * the project's page is at http://www.linuxtv.org
23 */
24
25#ifndef BSRU6_H
26#define BSRU6_H
27
28static u8 alps_bsru6_inittab[] = {
29 0x01, 0x15,
30 0x02, 0x00,
31 0x03, 0x00,
32 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
33 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
34 0x06, 0x40, /* DAC not used, set to high impendance mode */
35 0x07, 0x00, /* DAC LSB */
36 0x08, 0x40, /* DiSEqC off, LNB power on OP2/LOCK pin on */
37 0x09, 0x00, /* FIFO */
38 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
39 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
40 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
41 0x10, 0x3f, // AGC2 0x3d
42 0x11, 0x84,
43 0x12, 0xb9,
44 0x15, 0xc9, // lock detector threshold
45 0x16, 0x00,
46 0x17, 0x00,
47 0x18, 0x00,
48 0x19, 0x00,
49 0x1a, 0x00,
50 0x1f, 0x50,
51 0x20, 0x00,
52 0x21, 0x00,
53 0x22, 0x00,
54 0x23, 0x00,
55 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
56 0x29, 0x1e, // 1/2 threshold
57 0x2a, 0x14, // 2/3 threshold
58 0x2b, 0x0f, // 3/4 threshold
59 0x2c, 0x09, // 5/6 threshold
60 0x2d, 0x05, // 7/8 threshold
61 0x2e, 0x01,
62 0x31, 0x1f, // test all FECs
63 0x32, 0x19, // viterbi and synchro search
64 0x33, 0xfc, // rs control
65 0x34, 0x93, // error control
66 0x0f, 0x52,
67 0xff, 0xff
68};
69
70static int alps_bsru6_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio)
71{
72 u8 aclk = 0;
73 u8 bclk = 0;
74
75 if (srate < 1500000) {
76 aclk = 0xb7;
77 bclk = 0x47;
78 } else if (srate < 3000000) {
79 aclk = 0xb7;
80 bclk = 0x4b;
81 } else if (srate < 7000000) {
82 aclk = 0xb7;
83 bclk = 0x4f;
84 } else if (srate < 14000000) {
85 aclk = 0xb7;
86 bclk = 0x53;
87 } else if (srate < 30000000) {
88 aclk = 0xb6;
89 bclk = 0x53;
90 } else if (srate < 45000000) {
91 aclk = 0xb4;
92 bclk = 0x51;
93 }
94
95 stv0299_writereg(fe, 0x13, aclk);
96 stv0299_writereg(fe, 0x14, bclk);
97 stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
98 stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
99 stv0299_writereg(fe, 0x21, ratio & 0xf0);
100
101 return 0;
102}
103
104static int alps_bsru6_pll_set(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters *params)
105{
106 u8 buf[4];
107 u32 div;
108 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
109
110 if ((params->frequency < 950000) || (params->frequency > 2150000))
111 return -EINVAL;
112
113 div = (params->frequency + (125 - 1)) / 125; // round correctly
114 buf[0] = (div >> 8) & 0x7f;
115 buf[1] = div & 0xff;
116 buf[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
117 buf[3] = 0xC4;
118
119 if (params->frequency > 1530000)
120 buf[3] = 0xc0;
121
122 if (i2c_transfer(i2c, &msg, 1) != 1)
123 return -EIO;
124 return 0;
125}
126
127static struct stv0299_config alps_bsru6_config = {
128 .demod_address = 0x68,
129 .inittab = alps_bsru6_inittab,
130 .mclk = 88000000UL,
131 .invert = 1,
132 .skip_reinit = 0,
133 .lock_output = STV0229_LOCKOUTPUT_1,
134 .volt13_op0_op1 = STV0299_VOLT13_OP1,
135 .min_delay_ms = 100,
136 .set_symbol_rate = alps_bsru6_set_symbol_rate,
137 .pll_set = alps_bsru6_pll_set,
138};
139
140#endif
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index d15d32c51dc5..f3edf8b517dd 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -371,6 +371,15 @@ static int cx24110_initfe(struct dvb_frontend* fe)
371 return 0; 371 return 0;
372} 372}
373 373
374static int cx24110_sleep(struct dvb_frontend *fe)
375{
376 struct cx24110_state *state = fe->demodulator_priv;
377
378 if (state->config->pll_sleep)
379 return state->config->pll_sleep(fe);
380 return 0;
381}
382
374static int cx24110_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltage) 383static int cx24110_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltage)
375{ 384{
376 struct cx24110_state *state = fe->demodulator_priv; 385 struct cx24110_state *state = fe->demodulator_priv;
@@ -418,6 +427,9 @@ static int cx24110_send_diseqc_msg(struct dvb_frontend* fe,
418 struct cx24110_state *state = fe->demodulator_priv; 427 struct cx24110_state *state = fe->demodulator_priv;
419 unsigned long timeout; 428 unsigned long timeout;
420 429
430 if (cmd->msg_len < 3 || cmd->msg_len > 6)
431 return -EINVAL; /* not implemented */
432
421 for (i = 0; i < cmd->msg_len; i++) 433 for (i = 0; i < cmd->msg_len; i++)
422 cx24110_writereg(state, 0x79 + i, cmd->msg[i]); 434 cx24110_writereg(state, 0x79 + i, cmd->msg[i]);
423 435
@@ -639,6 +651,7 @@ static struct dvb_frontend_ops cx24110_ops = {
639 .release = cx24110_release, 651 .release = cx24110_release,
640 652
641 .init = cx24110_initfe, 653 .init = cx24110_initfe,
654 .sleep = cx24110_sleep,
642 .set_frontend = cx24110_set_frontend, 655 .set_frontend = cx24110_set_frontend,
643 .get_frontend = cx24110_get_frontend, 656 .get_frontend = cx24110_get_frontend,
644 .read_status = cx24110_read_status, 657 .read_status = cx24110_read_status,
diff --git a/drivers/media/dvb/frontends/cx24110.h b/drivers/media/dvb/frontends/cx24110.h
index b63ecf26421a..609ac642b406 100644
--- a/drivers/media/dvb/frontends/cx24110.h
+++ b/drivers/media/dvb/frontends/cx24110.h
@@ -35,6 +35,7 @@ struct cx24110_config
35 /* PLL maintenance */ 35 /* PLL maintenance */
36 int (*pll_init)(struct dvb_frontend* fe); 36 int (*pll_init)(struct dvb_frontend* fe);
37 int (*pll_set)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params); 37 int (*pll_set)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
38 int (*pll_sleep)(struct dvb_frontend* fe);
38}; 39};
39 40
40extern struct dvb_frontend* cx24110_attach(const struct cx24110_config* config, 41extern struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index 4dcb6050d4fa..b6e2c387a04c 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -362,6 +362,63 @@ struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
362}; 362};
363EXPORT_SYMBOL(dvb_pll_philips_sd1878_tda8261); 363EXPORT_SYMBOL(dvb_pll_philips_sd1878_tda8261);
364 364
365/*
366 * Philips TD1316 Tuner.
367 */
368static void td1316_bw(u8 *buf, u32 freq, int bandwidth)
369{
370 u8 band;
371
372 /* determine band */
373 if (freq < 161000000)
374 band = 1;
375 else if (freq < 444000000)
376 band = 2;
377 else
378 band = 4;
379
380 buf[3] |= band;
381
382 /* setup PLL filter */
383 if (bandwidth == BANDWIDTH_8_MHZ)
384 buf[3] |= 1 << 3;
385}
386
387struct dvb_pll_desc dvb_pll_philips_td1316 = {
388 .name = "Philips TD1316",
389 .min = 87000000,
390 .max = 895000000,
391 .setbw = td1316_bw,
392 .count = 9,
393 .entries = {
394 { 93834000, 36166000, 166666, 0xca, 0x60},
395 { 123834000, 36166000, 166666, 0xca, 0xa0},
396 { 163834000, 36166000, 166666, 0xca, 0xc0},
397 { 253834000, 36166000, 166666, 0xca, 0x60},
398 { 383834000, 36166000, 166666, 0xca, 0xa0},
399 { 443834000, 36166000, 166666, 0xca, 0xc0},
400 { 583834000, 36166000, 166666, 0xca, 0x60},
401 { 793834000, 36166000, 166666, 0xca, 0xa0},
402 { 858834000, 36166000, 166666, 0xca, 0xe0},
403 },
404};
405EXPORT_SYMBOL(dvb_pll_philips_td1316);
406
407/* FE6600 used on DViCO Hybrid */
408struct dvb_pll_desc dvb_pll_thomson_fe6600 = {
409 .name = "Thomson FE6600",
410 .min = 44250000,
411 .max = 858000000,
412 .count = 4,
413 .entries = {
414 { 250000000, 36213333, 166667, 0xb4, 0x12 },
415 { 455000000, 36213333, 166667, 0xfe, 0x11 },
416 { 775500000, 36213333, 166667, 0xbc, 0x18 },
417 { 999999999, 36213333, 166667, 0xf4, 0x18 },
418 }
419};
420EXPORT_SYMBOL(dvb_pll_thomson_fe6600);
421
365/* ----------------------------------------------------------- */ 422/* ----------------------------------------------------------- */
366/* code */ 423/* code */
367 424
@@ -391,8 +448,8 @@ int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
391 div = (freq + desc->entries[i].offset) / desc->entries[i].stepsize; 448 div = (freq + desc->entries[i].offset) / desc->entries[i].stepsize;
392 buf[0] = div >> 8; 449 buf[0] = div >> 8;
393 buf[1] = div & 0xff; 450 buf[1] = div & 0xff;
394 buf[2] = desc->entries[i].cb1; 451 buf[2] = desc->entries[i].config;
395 buf[3] = desc->entries[i].cb2; 452 buf[3] = desc->entries[i].cb;
396 453
397 if (desc->setbw) 454 if (desc->setbw)
398 desc->setbw(buf, freq, bandwidth); 455 desc->setbw(buf, freq, bandwidth);
diff --git a/drivers/media/dvb/frontends/dvb-pll.h b/drivers/media/dvb/frontends/dvb-pll.h
index bb8d4b4eb183..2b8461784989 100644
--- a/drivers/media/dvb/frontends/dvb-pll.h
+++ b/drivers/media/dvb/frontends/dvb-pll.h
@@ -15,8 +15,8 @@ struct dvb_pll_desc {
15 u32 limit; 15 u32 limit;
16 u32 offset; 16 u32 offset;
17 u32 stepsize; 17 u32 stepsize;
18 u8 cb1; 18 u8 config;
19 u8 cb2; 19 u8 cb;
20 } entries[12]; 20 } entries[12];
21}; 21};
22 22
@@ -40,6 +40,9 @@ extern struct dvb_pll_desc dvb_pll_tuv1236d;
40extern struct dvb_pll_desc dvb_pll_tdhu2; 40extern struct dvb_pll_desc dvb_pll_tdhu2;
41extern struct dvb_pll_desc dvb_pll_samsung_tbmv; 41extern struct dvb_pll_desc dvb_pll_samsung_tbmv;
42extern struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261; 42extern struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261;
43extern struct dvb_pll_desc dvb_pll_philips_td1316;
44
45extern struct dvb_pll_desc dvb_pll_thomson_fe6600;
43 46
44int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf, 47int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
45 u32 freq, int bandwidth); 48 u32 freq, int bandwidth);
diff --git a/drivers/media/dvb/frontends/lnbp21.h b/drivers/media/dvb/frontends/lnbp21.h
new file mode 100644
index 000000000000..0dcbe61b61b1
--- /dev/null
+++ b/drivers/media/dvb/frontends/lnbp21.h
@@ -0,0 +1,139 @@
1/*
2 * lnbp21.h - driver for lnb supply and control ic lnbp21
3 *
4 * Copyright (C) 2006 Oliver Endriss
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
22 *
23 *
24 * the project's page is at http://www.linuxtv.org
25 */
26
27#ifndef _LNBP21_H
28#define _LNBP21_H
29
30/* system register */
31#define LNBP21_OLF 0x01
32#define LNBP21_OTF 0x02
33#define LNBP21_EN 0x04
34#define LNBP21_VSEL 0x08
35#define LNBP21_LLC 0x10
36#define LNBP21_TEN 0x20
37#define LNBP21_ISEL 0x40
38#define LNBP21_PCL 0x80
39
40struct lnbp21 {
41 u8 config;
42 u8 override_or;
43 u8 override_and;
44 struct i2c_adapter *i2c;
45 void (*release_chain)(struct dvb_frontend* fe);
46};
47
48static int lnbp21_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
49{
50 struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->misc_priv;
51 struct i2c_msg msg = { .addr = 0x08, .flags = 0,
52 .buf = &lnbp21->config,
53 .len = sizeof(lnbp21->config) };
54
55 lnbp21->config &= ~(LNBP21_VSEL | LNBP21_EN);
56
57 switch(voltage) {
58 case SEC_VOLTAGE_OFF:
59 break;
60 case SEC_VOLTAGE_13:
61 lnbp21->config |= LNBP21_EN;
62 break;
63 case SEC_VOLTAGE_18:
64 lnbp21->config |= (LNBP21_EN | LNBP21_VSEL);
65 break;
66 default:
67 return -EINVAL;
68 };
69
70 lnbp21->config |= lnbp21->override_or;
71 lnbp21->config &= lnbp21->override_and;
72
73 return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO;
74}
75
76static int lnbp21_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg)
77{
78 struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->misc_priv;
79 struct i2c_msg msg = { .addr = 0x08, .flags = 0,
80 .buf = &lnbp21->config,
81 .len = sizeof(lnbp21->config) };
82
83 if (arg)
84 lnbp21->config |= LNBP21_LLC;
85 else
86 lnbp21->config &= ~LNBP21_LLC;
87
88 lnbp21->config |= lnbp21->override_or;
89 lnbp21->config &= lnbp21->override_and;
90
91 return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO;
92}
93
94static void lnbp21_exit(struct dvb_frontend *fe)
95{
96 struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->misc_priv;
97
98 /* LNBP power off */
99 lnbp21_set_voltage(fe, SEC_VOLTAGE_OFF);
100
101 /* free data & call next release routine */
102 fe->ops->release = lnbp21->release_chain;
103 kfree(fe->misc_priv);
104 fe->misc_priv = NULL;
105 if (fe->ops->release)
106 fe->ops->release(fe);
107}
108
109static int lnbp21_init(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 override_set, u8 override_clear)
110{
111 struct lnbp21 *lnbp21 = kmalloc(sizeof(struct lnbp21), GFP_KERNEL);
112
113 if (!lnbp21)
114 return -ENOMEM;
115
116 /* default configuration */
117 lnbp21->config = LNBP21_ISEL;
118
119 /* bits which should be forced to '1' */
120 lnbp21->override_or = override_set;
121
122 /* bits which should be forced to '0' */
123 lnbp21->override_and = ~override_clear;
124
125 /* install release callback */
126 lnbp21->release_chain = fe->ops->release;
127 fe->ops->release = lnbp21_exit;
128
129 /* override frontend ops */
130 fe->ops->set_voltage = lnbp21_set_voltage;
131 fe->ops->enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage;
132
133 lnbp21->i2c = i2c;
134 fe->misc_priv = lnbp21;
135
136 return lnbp21_set_voltage(fe, SEC_VOLTAGE_OFF);
137}
138
139#endif
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index c63e9a5084eb..8e8df7b4ca0e 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -229,7 +229,7 @@ static int tda1004x_enable_tuner_i2c(struct tda1004x_state *state)
229 dprintk("%s\n", __FUNCTION__); 229 dprintk("%s\n", __FUNCTION__);
230 230
231 result = tda1004x_write_mask(state, TDA1004X_CONFC4, 2, 2); 231 result = tda1004x_write_mask(state, TDA1004X_CONFC4, 2, 2);
232 msleep(1); 232 msleep(20);
233 return result; 233 return result;
234} 234}
235 235
@@ -502,7 +502,12 @@ static int tda10046_fwupload(struct dvb_frontend* fe)
502 const struct firmware *fw; 502 const struct firmware *fw;
503 503
504 /* reset + wake up chip */ 504 /* reset + wake up chip */
505 tda1004x_write_byteI(state, TDA1004X_CONFC4, 0); 505 if (state->config->xtal_freq == TDA10046_XTAL_4M) {
506 tda1004x_write_byteI(state, TDA1004X_CONFC4, 0);
507 } else {
508 dprintk("%s: 16MHz Xtal, reducing I2C speed\n", __FUNCTION__);
509 tda1004x_write_byteI(state, TDA1004X_CONFC4, 0x80);
510 }
506 tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 1, 0); 511 tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 1, 0);
507 /* let the clocks recover from sleep */ 512 /* let the clocks recover from sleep */
508 msleep(5); 513 msleep(5);
@@ -651,7 +656,7 @@ static int tda10046_init(struct dvb_frontend* fe)
651 // tda setup 656 // tda setup
652 tda1004x_write_mask(state, TDA1004X_CONFC4, 0x20, 0); // disable DSP watchdog timer 657 tda1004x_write_mask(state, TDA1004X_CONFC4, 0x20, 0); // disable DSP watchdog timer
653 tda1004x_write_byteI(state, TDA1004X_AUTO, 0x87); // 100 ppm crystal, select HP stream 658 tda1004x_write_byteI(state, TDA1004X_AUTO, 0x87); // 100 ppm crystal, select HP stream
654 tda1004x_write_byteI(state, TDA1004X_CONFC1, 8); // disable pulse killer 659 tda1004x_write_byteI(state, TDA1004X_CONFC1, 0x88); // enable pulse killer
655 660
656 switch (state->config->agc_config) { 661 switch (state->config->agc_config) {
657 case TDA10046_AGC_DEFAULT: 662 case TDA10046_AGC_DEFAULT:
@@ -672,6 +677,12 @@ static int tda10046_init(struct dvb_frontend* fe)
672 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize 677 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
673 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x6a); // set AGC polarities 678 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x6a); // set AGC polarities
674 break; 679 break;
680 case TDA10046_AGC_TDA827X_GPL:
681 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
682 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
683 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
684 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities
685 break;
675 } 686 }
676 tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38); 687 tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38);
677 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0x61); // Turn both AGC outputs on 688 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0x61); // Turn both AGC outputs on
@@ -683,6 +694,7 @@ static int tda10046_init(struct dvb_frontend* fe)
683 tda1004x_write_byteI(state, TDA10046H_CVBER_CTRL, 0x1a); // 10^6 VBER measurement bits 694 tda1004x_write_byteI(state, TDA10046H_CVBER_CTRL, 0x1a); // 10^6 VBER measurement bits
684 tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 7); // MPEG2 interface config 695 tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 7); // MPEG2 interface config
685 tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0xc0); // MPEG2 interface config 696 tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0xc0); // MPEG2 interface config
697 // tda1004x_write_mask(state, 0x50, 0x80, 0x80); // handle out of guard echoes
686 tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7); 698 tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7);
687 699
688 state->initialised = 1; 700 state->initialised = 1;
@@ -1027,6 +1039,7 @@ static int tda1004x_read_status(struct dvb_frontend* fe, fe_status_t * fe_status
1027 if (status == -1) 1039 if (status == -1)
1028 return -EIO; 1040 return -EIO;
1029 cber |= (status << 8); 1041 cber |= (status << 8);
1042 // The address 0x20 should be read to cope with a TDA10046 bug
1030 tda1004x_read_byte(state, TDA1004X_CBER_RESET); 1043 tda1004x_read_byte(state, TDA1004X_CBER_RESET);
1031 1044
1032 if (cber != 65535) 1045 if (cber != 65535)
@@ -1047,7 +1060,8 @@ static int tda1004x_read_status(struct dvb_frontend* fe, fe_status_t * fe_status
1047 status = tda1004x_read_byte(state, TDA1004X_VBER_MSB); 1060 status = tda1004x_read_byte(state, TDA1004X_VBER_MSB);
1048 if (status == -1) 1061 if (status == -1)
1049 return -EIO; 1062 return -EIO;
1050 vber |= ((status << 16) & 0x0f); 1063 vber |= (status & 0x0f) << 16;
1064 // The CVBER_LUT should be read to cope with TDA10046 hardware bug
1051 tda1004x_read_byte(state, TDA1004X_CVBER_LUT); 1065 tda1004x_read_byte(state, TDA1004X_CVBER_LUT);
1052 1066
1053 // if RS has passed some valid TS packets, then we must be 1067 // if RS has passed some valid TS packets, then we must be
@@ -1161,6 +1175,7 @@ static int tda1004x_read_ber(struct dvb_frontend* fe, u32* ber)
1161 if (tmp < 0) 1175 if (tmp < 0)
1162 return -EIO; 1176 return -EIO;
1163 *ber |= (tmp << 9); 1177 *ber |= (tmp << 9);
1178 // The address 0x20 should be read to cope with a TDA10046 bug
1164 tda1004x_read_byte(state, TDA1004X_CBER_RESET); 1179 tda1004x_read_byte(state, TDA1004X_CBER_RESET);
1165 1180
1166 dprintk("%s: ber=0x%x\n", __FUNCTION__, *ber); 1181 dprintk("%s: ber=0x%x\n", __FUNCTION__, *ber);
@@ -1187,6 +1202,8 @@ static int tda1004x_sleep(struct dvb_frontend* fe)
1187 tda1004x_disable_tuner_i2c(state); 1202 tda1004x_disable_tuner_i2c(state);
1188 } 1203 }
1189 } 1204 }
1205 /* set outputs to tristate */
1206 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0xff);
1190 tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1); 1207 tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1);
1191 break; 1208 break;
1192 } 1209 }
diff --git a/drivers/media/dvb/frontends/tda1004x.h b/drivers/media/dvb/frontends/tda1004x.h
index 8659c52647ad..cc0c4af64067 100644
--- a/drivers/media/dvb/frontends/tda1004x.h
+++ b/drivers/media/dvb/frontends/tda1004x.h
@@ -35,7 +35,8 @@ enum tda10046_agc {
35 TDA10046_AGC_DEFAULT, /* original configuration */ 35 TDA10046_AGC_DEFAULT, /* original configuration */
36 TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */ 36 TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */
37 TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */ 37 TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */
38 TDA10046_AGC_TDA827X, /* IF AGC only, special setup for tda827x */ 38 TDA10046_AGC_TDA827X, /* IF AGC only, special setup for tda827x */
39 TDA10046_AGC_TDA827X_GPL, /* same as above, but GPIOs 0 */
39}; 40};
40 41
41enum tda10046_if { 42enum tda10046_if {
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c
new file mode 100644
index 000000000000..d7d9f59d76d2
--- /dev/null
+++ b/drivers/media/dvb/frontends/zl10353.c
@@ -0,0 +1,311 @@
1/*
2 * Driver for Zarlink DVB-T ZL10353 demodulator
3 *
4 * Copyright (C) 2006 Christopher Pascoe <c.pascoe@itee.uq.edu.au>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/string.h>
28#include <linux/slab.h>
29
30#include "dvb_frontend.h"
31#include "zl10353_priv.h"
32#include "zl10353.h"
33
34struct zl10353_state {
35 struct i2c_adapter *i2c;
36 struct dvb_frontend frontend;
37 struct dvb_frontend_ops ops;
38
39 struct zl10353_config config;
40};
41
42static int debug_regs = 0;
43
44static int zl10353_single_write(struct dvb_frontend *fe, u8 reg, u8 val)
45{
46 struct zl10353_state *state = fe->demodulator_priv;
47 u8 buf[2] = { reg, val };
48 struct i2c_msg msg = { .addr = state->config.demod_address, .flags = 0,
49 .buf = buf, .len = 2 };
50 int err = i2c_transfer(state->i2c, &msg, 1);
51 if (err != 1) {
52 printk("zl10353: write to reg %x failed (err = %d)!\n", reg, err);
53 return err;
54 }
55 return 0;
56}
57
58int zl10353_write(struct dvb_frontend *fe, u8 *ibuf, int ilen)
59{
60 int err, i;
61 for (i = 0; i < ilen - 1; i++)
62 if ((err = zl10353_single_write(fe, ibuf[0] + i, ibuf[i + 1])))
63 return err;
64
65 return 0;
66}
67
68static int zl10353_read_register(struct zl10353_state *state, u8 reg)
69{
70 int ret;
71 u8 b0[1] = { reg };
72 u8 b1[1] = { 0 };
73 struct i2c_msg msg[2] = { { .addr = state->config.demod_address,
74 .flags = 0,
75 .buf = b0, .len = 1 },
76 { .addr = state->config.demod_address,
77 .flags = I2C_M_RD,
78 .buf = b1, .len = 1 } };
79
80 ret = i2c_transfer(state->i2c, msg, 2);
81
82 if (ret != 2) {
83 printk("%s: readreg error (reg=%d, ret==%i)\n",
84 __FUNCTION__, reg, ret);
85 return ret;
86 }
87
88 return b1[0];
89}
90
91static void zl10353_dump_regs(struct dvb_frontend *fe)
92{
93 struct zl10353_state *state = fe->demodulator_priv;
94 char buf[52], buf2[4];
95 int ret;
96 u8 reg;
97
98 /* Dump all registers. */
99 for (reg = 0; ; reg++) {
100 if (reg % 16 == 0) {
101 if (reg)
102 printk(KERN_DEBUG "%s\n", buf);
103 sprintf(buf, "%02x: ", reg);
104 }
105 ret = zl10353_read_register(state, reg);
106 if (ret >= 0)
107 sprintf(buf2, "%02x ", (u8)ret);
108 else
109 strcpy(buf2, "-- ");
110 strcat(buf, buf2);
111 if (reg == 0xff)
112 break;
113 }
114 printk(KERN_DEBUG "%s\n", buf);
115}
116
117static int zl10353_sleep(struct dvb_frontend *fe)
118{
119 static u8 zl10353_softdown[] = { 0x50, 0x0C, 0x44 };
120
121 zl10353_write(fe, zl10353_softdown, sizeof(zl10353_softdown));
122 return 0;
123}
124
125static int zl10353_set_parameters(struct dvb_frontend *fe,
126 struct dvb_frontend_parameters *param)
127{
128 struct zl10353_state *state = fe->demodulator_priv;
129 u8 pllbuf[6] = { 0x67 };
130
131 /* These settings set "auto-everything" and start the FSM. */
132 zl10353_single_write(fe, 0x55, 0x80);
133 udelay(200);
134 zl10353_single_write(fe, 0xEA, 0x01);
135 udelay(200);
136 zl10353_single_write(fe, 0xEA, 0x00);
137
138 zl10353_single_write(fe, 0x56, 0x28);
139 zl10353_single_write(fe, 0x89, 0x20);
140 zl10353_single_write(fe, 0x5E, 0x00);
141 zl10353_single_write(fe, 0x65, 0x5A);
142 zl10353_single_write(fe, 0x66, 0xE9);
143 zl10353_single_write(fe, 0x62, 0x0A);
144
145 state->config.pll_set(fe, param, pllbuf + 1);
146 zl10353_write(fe, pllbuf, sizeof(pllbuf));
147
148 zl10353_single_write(fe, 0x70, 0x01);
149 udelay(250);
150 zl10353_single_write(fe, 0xE4, 0x00);
151 zl10353_single_write(fe, 0xE5, 0x2A);
152 zl10353_single_write(fe, 0xE9, 0x02);
153 zl10353_single_write(fe, 0xE7, 0x40);
154 zl10353_single_write(fe, 0xE8, 0x10);
155
156 return 0;
157}
158
159static int zl10353_read_status(struct dvb_frontend *fe, fe_status_t *status)
160{
161 struct zl10353_state *state = fe->demodulator_priv;
162 int s6, s7, s8;
163
164 if ((s6 = zl10353_read_register(state, STATUS_6)) < 0)
165 return -EREMOTEIO;
166 if ((s7 = zl10353_read_register(state, STATUS_7)) < 0)
167 return -EREMOTEIO;
168 if ((s8 = zl10353_read_register(state, STATUS_8)) < 0)
169 return -EREMOTEIO;
170
171 *status = 0;
172 if (s6 & (1 << 2))
173 *status |= FE_HAS_CARRIER;
174 if (s6 & (1 << 1))
175 *status |= FE_HAS_VITERBI;
176 if (s6 & (1 << 5))
177 *status |= FE_HAS_LOCK;
178 if (s7 & (1 << 4))
179 *status |= FE_HAS_SYNC;
180 if (s8 & (1 << 6))
181 *status |= FE_HAS_SIGNAL;
182
183 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
184 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
185 *status &= ~FE_HAS_LOCK;
186
187 return 0;
188}
189
190static int zl10353_read_snr(struct dvb_frontend *fe, u16 *snr)
191{
192 struct zl10353_state *state = fe->demodulator_priv;
193 u8 _snr;
194
195 if (debug_regs)
196 zl10353_dump_regs(fe);
197
198 _snr = zl10353_read_register(state, SNR);
199 *snr = (_snr << 8) | _snr;
200
201 return 0;
202}
203
204static int zl10353_get_tune_settings(struct dvb_frontend *fe,
205 struct dvb_frontend_tune_settings
206 *fe_tune_settings)
207{
208 fe_tune_settings->min_delay_ms = 1000;
209 fe_tune_settings->step_size = 0;
210 fe_tune_settings->max_drift = 0;
211
212 return 0;
213}
214
215static int zl10353_init(struct dvb_frontend *fe)
216{
217 struct zl10353_state *state = fe->demodulator_priv;
218 u8 zl10353_reset_attach[6] = { 0x50, 0x03, 0x64, 0x46, 0x15, 0x0F };
219 int rc = 0;
220
221 if (debug_regs)
222 zl10353_dump_regs(fe);
223
224 /* Do a "hard" reset if not already done */
225 if (zl10353_read_register(state, 0x50) != 0x03) {
226 rc = zl10353_write(fe, zl10353_reset_attach,
227 sizeof(zl10353_reset_attach));
228 if (debug_regs)
229 zl10353_dump_regs(fe);
230 }
231
232 return 0;
233}
234
235static void zl10353_release(struct dvb_frontend *fe)
236{
237 struct zl10353_state *state = fe->demodulator_priv;
238
239 kfree(state);
240}
241
242static struct dvb_frontend_ops zl10353_ops;
243
244struct dvb_frontend *zl10353_attach(const struct zl10353_config *config,
245 struct i2c_adapter *i2c)
246{
247 struct zl10353_state *state = NULL;
248
249 /* allocate memory for the internal state */
250 state = kzalloc(sizeof(struct zl10353_state), GFP_KERNEL);
251 if (state == NULL)
252 goto error;
253
254 /* setup the state */
255 state->i2c = i2c;
256 memcpy(&state->config, config, sizeof(struct zl10353_config));
257 memcpy(&state->ops, &zl10353_ops, sizeof(struct dvb_frontend_ops));
258
259 /* check if the demod is there */
260 if (zl10353_read_register(state, CHIP_ID) != ID_ZL10353)
261 goto error;
262
263 /* create dvb_frontend */
264 state->frontend.ops = &state->ops;
265 state->frontend.demodulator_priv = state;
266
267 return &state->frontend;
268error:
269 kfree(state);
270 return NULL;
271}
272
273static struct dvb_frontend_ops zl10353_ops = {
274
275 .info = {
276 .name = "Zarlink ZL10353 DVB-T",
277 .type = FE_OFDM,
278 .frequency_min = 174000000,
279 .frequency_max = 862000000,
280 .frequency_stepsize = 166667,
281 .frequency_tolerance = 0,
282 .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
283 FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
284 FE_CAN_FEC_AUTO |
285 FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
286 FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
287 FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER |
288 FE_CAN_MUTE_TS
289 },
290
291 .release = zl10353_release,
292
293 .init = zl10353_init,
294 .sleep = zl10353_sleep,
295
296 .set_frontend = zl10353_set_parameters,
297 .get_tune_settings = zl10353_get_tune_settings,
298
299 .read_status = zl10353_read_status,
300 .read_snr = zl10353_read_snr,
301};
302
303module_param(debug_regs, int, 0644);
304MODULE_PARM_DESC(debug_regs, "Turn on/off frontend register dumps (default:off).");
305
306MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver");
307MODULE_AUTHOR("Chris Pascoe");
308MODULE_LICENSE("GPL");
309
310EXPORT_SYMBOL(zl10353_attach);
311EXPORT_SYMBOL(zl10353_write);
diff --git a/drivers/media/dvb/frontends/zl10353.h b/drivers/media/dvb/frontends/zl10353.h
new file mode 100644
index 000000000000..5cc4ae718d8c
--- /dev/null
+++ b/drivers/media/dvb/frontends/zl10353.h
@@ -0,0 +1,43 @@
1/*
2 * Driver for Zarlink DVB-T ZL10353 demodulator
3 *
4 * Copyright (C) 2006 Christopher Pascoe <c.pascoe@itee.uq.edu.au>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
20 */
21
22#ifndef ZL10353_H
23#define ZL10353_H
24
25#include <linux/dvb/frontend.h>
26
27struct zl10353_config
28{
29 /* demodulator's I2C address */
30 u8 demod_address;
31
32 /* function which configures the PLL buffer (for secondary I2C
33 * connected tuner) or tunes the PLL (for direct connected tuner) */
34 int (*pll_set)(struct dvb_frontend *fe,
35 struct dvb_frontend_parameters *params, u8 *pllbuf);
36};
37
38extern struct dvb_frontend* zl10353_attach(const struct zl10353_config *config,
39 struct i2c_adapter *i2c);
40
41extern int zl10353_write(struct dvb_frontend *fe, u8 *ibuf, int ilen);
42
43#endif /* ZL10353_H */
diff --git a/drivers/media/dvb/frontends/zl10353_priv.h b/drivers/media/dvb/frontends/zl10353_priv.h
new file mode 100644
index 000000000000..b72224bd7dde
--- /dev/null
+++ b/drivers/media/dvb/frontends/zl10353_priv.h
@@ -0,0 +1,42 @@
1/*
2 * Driver for Zarlink DVB-T ZL10353 demodulator
3 *
4 * Copyright (C) 2006 Christopher Pascoe <c.pascoe@itee.uq.edu.au>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
20 */
21
22#ifndef _ZL10353_PRIV_
23#define _ZL10353_PRIV_
24
25#define ID_ZL10353 0x14
26
27enum zl10353_reg_addr {
28 INTERRUPT_0 = 0x00,
29 INTERRUPT_1 = 0x01,
30 INTERRUPT_2 = 0x02,
31 INTERRUPT_3 = 0x03,
32 INTERRUPT_4 = 0x04,
33 INTERRUPT_5 = 0x05,
34 STATUS_6 = 0x06,
35 STATUS_7 = 0x07,
36 STATUS_8 = 0x08,
37 STATUS_9 = 0x09,
38 SNR = 0x10,
39 CHIP_ID = 0x7F,
40};
41
42#endif /* _ZL10353_PRIV_ */
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 7c6ccb96b157..840efec32cb6 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -54,7 +54,6 @@
54#include <linux/i2c.h> 54#include <linux/i2c.h>
55 55
56#include <asm/system.h> 56#include <asm/system.h>
57#include <asm/semaphore.h>
58 57
59#include <linux/dvb/frontend.h> 58#include <linux/dvb/frontend.h>
60 59
@@ -67,6 +66,10 @@
67#include "av7110_ca.h" 66#include "av7110_ca.h"
68#include "av7110_ipack.h" 67#include "av7110_ipack.h"
69 68
69#include "bsbe1.h"
70#include "lnbp21.h"
71#include "bsru6.h"
72
70#define TS_WIDTH 376 73#define TS_WIDTH 376
71#define TS_HEIGHT 512 74#define TS_HEIGHT 512
72#define TS_BUFLEN (TS_WIDTH*TS_HEIGHT) 75#define TS_BUFLEN (TS_WIDTH*TS_HEIGHT)
@@ -82,6 +85,8 @@ static int hw_sections;
82static int rgb_on; 85static int rgb_on;
83static int volume = 255; 86static int volume = 255;
84static int budgetpatch; 87static int budgetpatch;
88static int wss_cfg_4_3 = 0x4008;
89static int wss_cfg_16_9 = 0x0007;
85 90
86module_param_named(debug, av7110_debug, int, 0644); 91module_param_named(debug, av7110_debug, int, 0644);
87MODULE_PARM_DESC(debug, "debug level (bitmask, default 0)"); 92MODULE_PARM_DESC(debug, "debug level (bitmask, default 0)");
@@ -100,6 +105,10 @@ module_param(volume, int, 0444);
100MODULE_PARM_DESC(volume, "initial volume: default 255 (range 0-255)"); 105MODULE_PARM_DESC(volume, "initial volume: default 255 (range 0-255)");
101module_param(budgetpatch, int, 0444); 106module_param(budgetpatch, int, 0444);
102MODULE_PARM_DESC(budgetpatch, "use budget-patch hardware modification: default 0 (0 no, 1 autodetect, 2 always)"); 107MODULE_PARM_DESC(budgetpatch, "use budget-patch hardware modification: default 0 (0 no, 1 autodetect, 2 always)");
108module_param(wss_cfg_4_3, int, 0444);
109MODULE_PARM_DESC(wss_cfg_4_3, "WSS 4:3 - default 0x4008 - bit 15: disable, 14: burst mode, 13..0: wss data");
110module_param(wss_cfg_16_9, int, 0444);
111MODULE_PARM_DESC(wss_cfg_16_9, "WSS 16:9 - default 0x0007 - bit 15: disable, 14: burst mode, 13..0: wss data");
103 112
104static void restart_feeds(struct av7110 *av7110); 113static void restart_feeds(struct av7110 *av7110);
105 114
@@ -125,6 +134,13 @@ static void init_av7110_av(struct av7110 *av7110)
125 if (ret < 0) 134 if (ret < 0)
126 printk("dvb-ttpci:cannot set internal volume to maximum:%d\n",ret); 135 printk("dvb-ttpci:cannot set internal volume to maximum:%d\n",ret);
127 136
137 ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 2, wss_cfg_4_3);
138 if (ret < 0)
139 printk("dvb-ttpci: unable to configure 4:3 wss\n");
140 ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 3, wss_cfg_16_9);
141 if (ret < 0)
142 printk("dvb-ttpci: unable to configure 16:9 wss\n");
143
128 ret = av7710_set_video_mode(av7110, vidmode); 144 ret = av7710_set_video_mode(av7110, vidmode);
129 if (ret < 0) 145 if (ret < 0)
130 printk("dvb-ttpci:cannot set video mode:%d\n",ret); 146 printk("dvb-ttpci:cannot set video mode:%d\n",ret);
@@ -242,10 +258,10 @@ static int arm_thread(void *data)
242 if (!av7110->arm_ready) 258 if (!av7110->arm_ready)
243 continue; 259 continue;
244 260
245 if (down_interruptible(&av7110->dcomlock)) 261 if (mutex_lock_interruptible(&av7110->dcomlock))
246 break; 262 break;
247 newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2); 263 newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2);
248 up(&av7110->dcomlock); 264 mutex_unlock(&av7110->dcomlock);
249 265
250 if (newloops == av7110->arm_loops || av7110->arm_errors > 3) { 266 if (newloops == av7110->arm_loops || av7110->arm_errors > 3) {
251 printk(KERN_ERR "dvb-ttpci: ARM crashed @ card %d\n", 267 printk(KERN_ERR "dvb-ttpci: ARM crashed @ card %d\n",
@@ -253,10 +269,10 @@ static int arm_thread(void *data)
253 269
254 recover_arm(av7110); 270 recover_arm(av7110);
255 271
256 if (down_interruptible(&av7110->dcomlock)) 272 if (mutex_lock_interruptible(&av7110->dcomlock))
257 break; 273 break;
258 newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2) - 1; 274 newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2) - 1;
259 up(&av7110->dcomlock); 275 mutex_unlock(&av7110->dcomlock);
260 } 276 }
261 av7110->arm_loops = newloops; 277 av7110->arm_loops = newloops;
262 av7110->arm_errors = 0; 278 av7110->arm_errors = 0;
@@ -741,7 +757,7 @@ int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
741 int ret = 0; 757 int ret = 0;
742 dprintk(4, "%p\n", av7110); 758 dprintk(4, "%p\n", av7110);
743 759
744 if (down_interruptible(&av7110->pid_mutex)) 760 if (mutex_lock_interruptible(&av7110->pid_mutex))
745 return -ERESTARTSYS; 761 return -ERESTARTSYS;
746 762
747 if (!(vpid & 0x8000)) 763 if (!(vpid & 0x8000))
@@ -760,7 +776,7 @@ int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
760 ret = SetPIDs(av7110, vpid, apid, ttpid, subpid, pcrpid); 776 ret = SetPIDs(av7110, vpid, apid, ttpid, subpid, pcrpid);
761 } 777 }
762 778
763 up(&av7110->pid_mutex); 779 mutex_unlock(&av7110->pid_mutex);
764 return ret; 780 return ret;
765} 781}
766 782
@@ -1088,11 +1104,9 @@ static int dvb_get_stc(struct dmx_demux *demux, unsigned int num,
1088 struct av7110 *av7110; 1104 struct av7110 *av7110;
1089 1105
1090 /* pointer casting paranoia... */ 1106 /* pointer casting paranoia... */
1091 if (!demux) 1107 BUG_ON(!demux);
1092 BUG();
1093 dvbdemux = (struct dvb_demux *) demux->priv; 1108 dvbdemux = (struct dvb_demux *) demux->priv;
1094 if (!dvbdemux) 1109 BUG_ON(!dvbdemux);
1095 BUG();
1096 av7110 = (struct av7110 *) dvbdemux->priv; 1110 av7110 = (struct av7110 *) dvbdemux->priv;
1097 1111
1098 dprintk(4, "%p\n", av7110); 1112 dprintk(4, "%p\n", av7110);
@@ -1570,208 +1584,6 @@ static struct ves1x93_config alps_bsrv2_config = {
1570 .pll_set = alps_bsrv2_pll_set, 1584 .pll_set = alps_bsrv2_pll_set,
1571}; 1585};
1572 1586
1573
1574static u8 alps_bsru6_inittab[] = {
1575 0x01, 0x15,
1576 0x02, 0x30,
1577 0x03, 0x00,
1578 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
1579 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
1580 0x06, 0x40, /* DAC not used, set to high impendance mode */
1581 0x07, 0x00, /* DAC LSB */
1582 0x08, 0x40, /* DiSEqC off, LNB power on OP2/LOCK pin on */
1583 0x09, 0x00, /* FIFO */
1584 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
1585 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
1586 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
1587 0x10, 0x3f, // AGC2 0x3d
1588 0x11, 0x84,
1589 0x12, 0xb9,
1590 0x15, 0xc9, // lock detector threshold
1591 0x16, 0x00,
1592 0x17, 0x00,
1593 0x18, 0x00,
1594 0x19, 0x00,
1595 0x1a, 0x00,
1596 0x1f, 0x50,
1597 0x20, 0x00,
1598 0x21, 0x00,
1599 0x22, 0x00,
1600 0x23, 0x00,
1601 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
1602 0x29, 0x1e, // 1/2 threshold
1603 0x2a, 0x14, // 2/3 threshold
1604 0x2b, 0x0f, // 3/4 threshold
1605 0x2c, 0x09, // 5/6 threshold
1606 0x2d, 0x05, // 7/8 threshold
1607 0x2e, 0x01,
1608 0x31, 0x1f, // test all FECs
1609 0x32, 0x19, // viterbi and synchro search
1610 0x33, 0xfc, // rs control
1611 0x34, 0x93, // error control
1612 0x0f, 0x52,
1613 0xff, 0xff
1614};
1615
1616static int alps_bsru6_set_symbol_rate(struct dvb_frontend* fe, u32 srate, u32 ratio)
1617{
1618 u8 aclk = 0;
1619 u8 bclk = 0;
1620
1621 if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; }
1622 else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; }
1623 else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; }
1624 else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; }
1625 else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; }
1626 else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; }
1627
1628 stv0299_writereg(fe, 0x13, aclk);
1629 stv0299_writereg(fe, 0x14, bclk);
1630 stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
1631 stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
1632 stv0299_writereg(fe, 0x21, (ratio ) & 0xf0);
1633
1634 return 0;
1635}
1636
1637static int alps_bsru6_pll_set(struct dvb_frontend* fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters* params)
1638{
1639 int ret;
1640 u8 data[4];
1641 u32 div;
1642 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
1643
1644 if ((params->frequency < 950000) || (params->frequency > 2150000))
1645 return -EINVAL;
1646
1647 div = (params->frequency + (125 - 1)) / 125; // round correctly
1648 data[0] = (div >> 8) & 0x7f;
1649 data[1] = div & 0xff;
1650 data[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
1651 data[3] = 0xC4;
1652
1653 if (params->frequency > 1530000) data[3] = 0xc0;
1654
1655 ret = i2c_transfer(i2c, &msg, 1);
1656 if (ret != 1)
1657 return -EIO;
1658 return 0;
1659}
1660
1661static struct stv0299_config alps_bsru6_config = {
1662
1663 .demod_address = 0x68,
1664 .inittab = alps_bsru6_inittab,
1665 .mclk = 88000000UL,
1666 .invert = 1,
1667 .skip_reinit = 0,
1668 .lock_output = STV0229_LOCKOUTPUT_1,
1669 .volt13_op0_op1 = STV0299_VOLT13_OP1,
1670 .min_delay_ms = 100,
1671 .set_symbol_rate = alps_bsru6_set_symbol_rate,
1672 .pll_set = alps_bsru6_pll_set,
1673};
1674
1675
1676static u8 alps_bsbe1_inittab[] = {
1677 0x01, 0x15,
1678 0x02, 0x30,
1679 0x03, 0x00,
1680 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
1681 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
1682 0x06, 0x40, /* DAC not used, set to high impendance mode */
1683 0x07, 0x00, /* DAC LSB */
1684 0x08, 0x40, /* DiSEqC off, LNB power on OP2/LOCK pin on */
1685 0x09, 0x00, /* FIFO */
1686 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
1687 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
1688 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
1689 0x10, 0x3f, // AGC2 0x3d
1690 0x11, 0x84,
1691 0x12, 0xb9,
1692 0x15, 0xc9, // lock detector threshold
1693 0x16, 0x00,
1694 0x17, 0x00,
1695 0x18, 0x00,
1696 0x19, 0x00,
1697 0x1a, 0x00,
1698 0x1f, 0x50,
1699 0x20, 0x00,
1700 0x21, 0x00,
1701 0x22, 0x00,
1702 0x23, 0x00,
1703 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
1704 0x29, 0x1e, // 1/2 threshold
1705 0x2a, 0x14, // 2/3 threshold
1706 0x2b, 0x0f, // 3/4 threshold
1707 0x2c, 0x09, // 5/6 threshold
1708 0x2d, 0x05, // 7/8 threshold
1709 0x2e, 0x01,
1710 0x31, 0x1f, // test all FECs
1711 0x32, 0x19, // viterbi and synchro search
1712 0x33, 0xfc, // rs control
1713 0x34, 0x93, // error control
1714 0x0f, 0x92,
1715 0xff, 0xff
1716};
1717
1718static int alps_bsbe1_pll_set(struct dvb_frontend* fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters* params)
1719{
1720 int ret;
1721 u8 data[4];
1722 u32 div;
1723 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
1724
1725 if ((params->frequency < 950000) || (params->frequency > 2150000))
1726 return -EINVAL;
1727
1728 div = (params->frequency + (125 - 1)) / 125; // round correctly
1729 data[0] = (div >> 8) & 0x7f;
1730 data[1] = div & 0xff;
1731 data[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
1732 data[3] = (params->frequency > 1530000) ? 0xE0 : 0xE4;
1733
1734 ret = i2c_transfer(i2c, &msg, 1);
1735 return (ret != 1) ? -EIO : 0;
1736}
1737
1738static struct stv0299_config alps_bsbe1_config = {
1739 .demod_address = 0x68,
1740 .inittab = alps_bsbe1_inittab,
1741 .mclk = 88000000UL,
1742 .invert = 1,
1743 .skip_reinit = 0,
1744 .min_delay_ms = 100,
1745 .set_symbol_rate = alps_bsru6_set_symbol_rate,
1746 .pll_set = alps_bsbe1_pll_set,
1747};
1748
1749static int lnbp21_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
1750{
1751 struct av7110* av7110 = (struct av7110*) fe->dvb->priv;
1752 int ret;
1753 u8 data[1];
1754 struct i2c_msg msg = { .addr = 0x08, .flags = 0, .buf = data, .len = sizeof(data) };
1755
1756 switch(voltage) {
1757 case SEC_VOLTAGE_OFF:
1758 data[0] = 0x00;
1759 break;
1760 case SEC_VOLTAGE_13:
1761 data[0] = 0x44;
1762 break;
1763 case SEC_VOLTAGE_18:
1764 data[0] = 0x4c;
1765 break;
1766 default:
1767 return -EINVAL;
1768 };
1769
1770 ret = i2c_transfer(&av7110->i2c_adap, &msg, 1);
1771 return (ret != 1) ? -EIO : 0;
1772}
1773
1774
1775static int alps_tdbe2_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) 1587static int alps_tdbe2_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
1776{ 1588{
1777 struct av7110* av7110 = fe->dvb->priv; 1589 struct av7110* av7110 = fe->dvb->priv;
@@ -2096,7 +1908,7 @@ static int av7110_fe_lock_fix(struct av7110* av7110, fe_status_t status)
2096 if (av7110->playing) 1908 if (av7110->playing)
2097 return 0; 1909 return 0;
2098 1910
2099 if (down_interruptible(&av7110->pid_mutex)) 1911 if (mutex_lock_interruptible(&av7110->pid_mutex))
2100 return -ERESTARTSYS; 1912 return -ERESTARTSYS;
2101 1913
2102 if (synced) { 1914 if (synced) {
@@ -2118,7 +1930,7 @@ static int av7110_fe_lock_fix(struct av7110* av7110, fe_status_t status)
2118 if (!ret) 1930 if (!ret)
2119 av7110->fe_synced = synced; 1931 av7110->fe_synced = synced;
2120 1932
2121 up(&av7110->pid_mutex); 1933 mutex_unlock(&av7110->pid_mutex);
2122 return ret; 1934 return ret;
2123} 1935}
2124 1936
@@ -2374,9 +2186,15 @@ static int frontend_init(struct av7110 *av7110)
2374 /* ALPS BSBE1 */ 2186 /* ALPS BSBE1 */
2375 av7110->fe = stv0299_attach(&alps_bsbe1_config, &av7110->i2c_adap); 2187 av7110->fe = stv0299_attach(&alps_bsbe1_config, &av7110->i2c_adap);
2376 if (av7110->fe) { 2188 if (av7110->fe) {
2377 av7110->fe->ops->set_voltage = lnbp21_set_voltage; 2189 if (lnbp21_init(av7110->fe, &av7110->i2c_adap, 0, 0)) {
2378 av7110->fe->ops->dishnetwork_send_legacy_command = NULL; 2190 printk("dvb-ttpci: LNBP21 not found!\n");
2379 av7110->recover = dvb_s_recover; 2191 if (av7110->fe->ops->release)
2192 av7110->fe->ops->release(av7110->fe);
2193 av7110->fe = NULL;
2194 } else {
2195 av7110->fe->ops->dishnetwork_send_legacy_command = NULL;
2196 av7110->recover = dvb_s_recover;
2197 }
2380 } 2198 }
2381 break; 2199 break;
2382 } 2200 }
@@ -2714,16 +2532,16 @@ static int __devinit av7110_attach(struct saa7146_dev* dev,
2714 tasklet_init (&av7110->debi_tasklet, debiirq, (unsigned long) av7110); 2532 tasklet_init (&av7110->debi_tasklet, debiirq, (unsigned long) av7110);
2715 tasklet_init (&av7110->gpio_tasklet, gpioirq, (unsigned long) av7110); 2533 tasklet_init (&av7110->gpio_tasklet, gpioirq, (unsigned long) av7110);
2716 2534
2717 sema_init(&av7110->pid_mutex, 1); 2535 mutex_init(&av7110->pid_mutex);
2718 2536
2719 /* locks for data transfers from/to AV7110 */ 2537 /* locks for data transfers from/to AV7110 */
2720 spin_lock_init(&av7110->debilock); 2538 spin_lock_init(&av7110->debilock);
2721 sema_init(&av7110->dcomlock, 1); 2539 mutex_init(&av7110->dcomlock);
2722 av7110->debitype = -1; 2540 av7110->debitype = -1;
2723 2541
2724 /* default OSD window */ 2542 /* default OSD window */
2725 av7110->osdwin = 1; 2543 av7110->osdwin = 1;
2726 sema_init(&av7110->osd_sema, 1); 2544 mutex_init(&av7110->osd_mutex);
2727 2545
2728 /* ARM "watchdog" */ 2546 /* ARM "watchdog" */
2729 init_waitqueue_head(&av7110->arm_wait); 2547 init_waitqueue_head(&av7110->arm_wait);
diff --git a/drivers/media/dvb/ttpci/av7110.h b/drivers/media/dvb/ttpci/av7110.h
index fafd25fab835..3e2e12124bae 100644
--- a/drivers/media/dvb/ttpci/av7110.h
+++ b/drivers/media/dvb/ttpci/av7110.h
@@ -16,6 +16,7 @@
16#include <linux/dvb/ca.h> 16#include <linux/dvb/ca.h>
17#include <linux/dvb/osd.h> 17#include <linux/dvb/osd.h>
18#include <linux/dvb/net.h> 18#include <linux/dvb/net.h>
19#include <linux/mutex.h>
19 20
20#include "dvbdev.h" 21#include "dvbdev.h"
21#include "demux.h" 22#include "demux.h"
@@ -127,7 +128,7 @@ struct av7110 {
127 /* DEBI and polled command interface */ 128 /* DEBI and polled command interface */
128 129
129 spinlock_t debilock; 130 spinlock_t debilock;
130 struct semaphore dcomlock; 131 struct mutex dcomlock;
131 volatile int debitype; 132 volatile int debitype;
132 volatile int debilen; 133 volatile int debilen;
133 134
@@ -146,7 +147,7 @@ struct av7110 {
146 147
147 int osdwin; /* currently active window */ 148 int osdwin; /* currently active window */
148 u16 osdbpp[8]; 149 u16 osdbpp[8];
149 struct semaphore osd_sema; 150 struct mutex osd_mutex;
150 151
151 /* CA */ 152 /* CA */
152 153
@@ -172,7 +173,7 @@ struct av7110 {
172 struct tasklet_struct vpe_tasklet; 173 struct tasklet_struct vpe_tasklet;
173 174
174 int fe_synced; 175 int fe_synced;
175 struct semaphore pid_mutex; 176 struct mutex pid_mutex;
176 177
177 int video_blank; 178 int video_blank;
178 struct video_status videostate; 179 struct video_status videostate;
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c
index 0bb6e74ae7f0..75736f2fe838 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.c
+++ b/drivers/media/dvb/ttpci/av7110_hw.c
@@ -327,10 +327,10 @@ int av7110_wait_msgstate(struct av7110 *av7110, u16 flags)
327 start = jiffies; 327 start = jiffies;
328 for (;;) { 328 for (;;) {
329 err = time_after(jiffies, start + ARM_WAIT_FREE); 329 err = time_after(jiffies, start + ARM_WAIT_FREE);
330 if (down_interruptible(&av7110->dcomlock)) 330 if (mutex_lock_interruptible(&av7110->dcomlock))
331 return -ERESTARTSYS; 331 return -ERESTARTSYS;
332 stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2); 332 stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
333 up(&av7110->dcomlock); 333 mutex_unlock(&av7110->dcomlock);
334 if ((stat & flags) == 0) 334 if ((stat & flags) == 0)
335 break; 335 break;
336 if (err) { 336 if (err) {
@@ -487,11 +487,11 @@ static int av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length)
487 dprintk(1, "arm not ready.\n"); 487 dprintk(1, "arm not ready.\n");
488 return -1; 488 return -1;
489 } 489 }
490 if (down_interruptible(&av7110->dcomlock)) 490 if (mutex_lock_interruptible(&av7110->dcomlock))
491 return -ERESTARTSYS; 491 return -ERESTARTSYS;
492 492
493 ret = __av7110_send_fw_cmd(av7110, buf, length); 493 ret = __av7110_send_fw_cmd(av7110, buf, length);
494 up(&av7110->dcomlock); 494 mutex_unlock(&av7110->dcomlock);
495 if (ret && ret!=-ERESTARTSYS) 495 if (ret && ret!=-ERESTARTSYS)
496 printk(KERN_ERR "dvb-ttpci: %s(): av7110_send_fw_cmd error %d\n", 496 printk(KERN_ERR "dvb-ttpci: %s(): av7110_send_fw_cmd error %d\n",
497 __FUNCTION__, ret); 497 __FUNCTION__, ret);
@@ -563,11 +563,11 @@ int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
563 return -1; 563 return -1;
564 } 564 }
565 565
566 if (down_interruptible(&av7110->dcomlock)) 566 if (mutex_lock_interruptible(&av7110->dcomlock))
567 return -ERESTARTSYS; 567 return -ERESTARTSYS;
568 568
569 if ((err = __av7110_send_fw_cmd(av7110, request_buf, request_buf_len)) < 0) { 569 if ((err = __av7110_send_fw_cmd(av7110, request_buf, request_buf_len)) < 0) {
570 up(&av7110->dcomlock); 570 mutex_unlock(&av7110->dcomlock);
571 printk(KERN_ERR "dvb-ttpci: av7110_fw_request error %d\n", err); 571 printk(KERN_ERR "dvb-ttpci: av7110_fw_request error %d\n", err);
572 return err; 572 return err;
573 } 573 }
@@ -579,7 +579,7 @@ int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
579 break; 579 break;
580 if (err) { 580 if (err) {
581 printk(KERN_ERR "%s: timeout waiting for COMMAND to complete\n", __FUNCTION__); 581 printk(KERN_ERR "%s: timeout waiting for COMMAND to complete\n", __FUNCTION__);
582 up(&av7110->dcomlock); 582 mutex_unlock(&av7110->dcomlock);
583 return -ETIMEDOUT; 583 return -ETIMEDOUT;
584 } 584 }
585#ifdef _NOHANDSHAKE 585#ifdef _NOHANDSHAKE
@@ -595,7 +595,7 @@ int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
595 break; 595 break;
596 if (err) { 596 if (err) {
597 printk(KERN_ERR "%s: timeout waiting for HANDSHAKE_REG\n", __FUNCTION__); 597 printk(KERN_ERR "%s: timeout waiting for HANDSHAKE_REG\n", __FUNCTION__);
598 up(&av7110->dcomlock); 598 mutex_unlock(&av7110->dcomlock);
599 return -ETIMEDOUT; 599 return -ETIMEDOUT;
600 } 600 }
601 msleep(1); 601 msleep(1);
@@ -606,12 +606,12 @@ int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
606 stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2); 606 stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
607 if (stat & GPMQOver) { 607 if (stat & GPMQOver) {
608 printk(KERN_ERR "%s: GPMQOver\n", __FUNCTION__); 608 printk(KERN_ERR "%s: GPMQOver\n", __FUNCTION__);
609 up(&av7110->dcomlock); 609 mutex_unlock(&av7110->dcomlock);
610 return -1; 610 return -1;
611 } 611 }
612 else if (stat & OSDQOver) { 612 else if (stat & OSDQOver) {
613 printk(KERN_ERR "%s: OSDQOver\n", __FUNCTION__); 613 printk(KERN_ERR "%s: OSDQOver\n", __FUNCTION__);
614 up(&av7110->dcomlock); 614 mutex_unlock(&av7110->dcomlock);
615 return -1; 615 return -1;
616 } 616 }
617#endif 617#endif
@@ -619,7 +619,7 @@ int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
619 for (i = 0; i < reply_buf_len; i++) 619 for (i = 0; i < reply_buf_len; i++)
620 reply_buf[i] = rdebi(av7110, DEBINOSWAP, COM_BUFF + 2 * i, 0, 2); 620 reply_buf[i] = rdebi(av7110, DEBINOSWAP, COM_BUFF + 2 * i, 0, 2);
621 621
622 up(&av7110->dcomlock); 622 mutex_unlock(&av7110->dcomlock);
623 return 0; 623 return 0;
624} 624}
625 625
@@ -735,7 +735,7 @@ static int FlushText(struct av7110 *av7110)
735 unsigned long start; 735 unsigned long start;
736 int err; 736 int err;
737 737
738 if (down_interruptible(&av7110->dcomlock)) 738 if (mutex_lock_interruptible(&av7110->dcomlock))
739 return -ERESTARTSYS; 739 return -ERESTARTSYS;
740 start = jiffies; 740 start = jiffies;
741 while (1) { 741 while (1) {
@@ -745,12 +745,12 @@ static int FlushText(struct av7110 *av7110)
745 if (err) { 745 if (err) {
746 printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for BUFF1_BASE == 0\n", 746 printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for BUFF1_BASE == 0\n",
747 __FUNCTION__); 747 __FUNCTION__);
748 up(&av7110->dcomlock); 748 mutex_unlock(&av7110->dcomlock);
749 return -ETIMEDOUT; 749 return -ETIMEDOUT;
750 } 750 }
751 msleep(1); 751 msleep(1);
752 } 752 }
753 up(&av7110->dcomlock); 753 mutex_unlock(&av7110->dcomlock);
754 return 0; 754 return 0;
755} 755}
756 756
@@ -761,7 +761,7 @@ static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, u8* buf)
761 int length = strlen(buf) + 1; 761 int length = strlen(buf) + 1;
762 u16 cbuf[5] = { (COMTYPE_OSD << 8) + DText, 3, win, x, y }; 762 u16 cbuf[5] = { (COMTYPE_OSD << 8) + DText, 3, win, x, y };
763 763
764 if (down_interruptible(&av7110->dcomlock)) 764 if (mutex_lock_interruptible(&av7110->dcomlock))
765 return -ERESTARTSYS; 765 return -ERESTARTSYS;
766 766
767 start = jiffies; 767 start = jiffies;
@@ -772,7 +772,7 @@ static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, u8* buf)
772 if (ret) { 772 if (ret) {
773 printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for BUFF1_BASE == 0\n", 773 printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for BUFF1_BASE == 0\n",
774 __FUNCTION__); 774 __FUNCTION__);
775 up(&av7110->dcomlock); 775 mutex_unlock(&av7110->dcomlock);
776 return -ETIMEDOUT; 776 return -ETIMEDOUT;
777 } 777 }
778 msleep(1); 778 msleep(1);
@@ -786,7 +786,7 @@ static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, u8* buf)
786 if (ret) { 786 if (ret) {
787 printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for HANDSHAKE_REG\n", 787 printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for HANDSHAKE_REG\n",
788 __FUNCTION__); 788 __FUNCTION__);
789 up(&av7110->dcomlock); 789 mutex_unlock(&av7110->dcomlock);
790 return -ETIMEDOUT; 790 return -ETIMEDOUT;
791 } 791 }
792 msleep(1); 792 msleep(1);
@@ -798,7 +798,7 @@ static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, u8* buf)
798 if (length & 1) 798 if (length & 1)
799 wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2, 0, 2); 799 wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2, 0, 2);
800 ret = __av7110_send_fw_cmd(av7110, cbuf, 5); 800 ret = __av7110_send_fw_cmd(av7110, cbuf, 5);
801 up(&av7110->dcomlock); 801 mutex_unlock(&av7110->dcomlock);
802 if (ret && ret!=-ERESTARTSYS) 802 if (ret && ret!=-ERESTARTSYS)
803 printk(KERN_ERR "dvb-ttpci: WriteText error %d\n", ret); 803 printk(KERN_ERR "dvb-ttpci: WriteText error %d\n", ret);
804 return ret; 804 return ret;
@@ -1062,7 +1062,7 @@ int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc)
1062{ 1062{
1063 int ret; 1063 int ret;
1064 1064
1065 if (down_interruptible(&av7110->osd_sema)) 1065 if (mutex_lock_interruptible(&av7110->osd_mutex))
1066 return -ERESTARTSYS; 1066 return -ERESTARTSYS;
1067 1067
1068 switch (dc->cmd) { 1068 switch (dc->cmd) {
@@ -1198,7 +1198,7 @@ int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc)
1198 break; 1198 break;
1199 } 1199 }
1200 1200
1201 up(&av7110->osd_sema); 1201 mutex_unlock(&av7110->osd_mutex);
1202 if (ret==-ERESTARTSYS) 1202 if (ret==-ERESTARTSYS)
1203 dprintk(1, "av7110_osd_cmd(%d) returns with -ERESTARTSYS\n",dc->cmd); 1203 dprintk(1, "av7110_osd_cmd(%d) returns with -ERESTARTSYS\n",dc->cmd);
1204 else if (ret) 1204 else if (ret)
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index 94cf38c7e8a8..2f23ceab8d44 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -579,14 +579,11 @@ static ssize_t av7110_vbi_write(struct file *file, const char __user *data, size
579 return -EFAULT; 579 return -EFAULT;
580 if ((d.id != 0 && d.id != V4L2_SLICED_WSS_625) || d.field != 0 || d.line != 23) 580 if ((d.id != 0 && d.id != V4L2_SLICED_WSS_625) || d.field != 0 || d.line != 23)
581 return -EINVAL; 581 return -EINVAL;
582 if (d.id) { 582 if (d.id)
583 av7110->wssData = ((d.data[1] << 8) & 0x3f00) | d.data[0]; 583 av7110->wssData = ((d.data[1] << 8) & 0x3f00) | d.data[0];
584 rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 584 else
585 2, 1, av7110->wssData); 585 av7110->wssData = 0x8000;
586 } else { 586 rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 1, av7110->wssData);
587 av7110->wssData = 0;
588 rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 1, 0);
589 }
590 return (rc < 0) ? rc : count; 587 return (rc < 0) ? rc : count;
591} 588}
592 589
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 1465c04e49aa..9dd4745f5312 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -1000,6 +1000,7 @@ static u8 read_pwm(struct budget_av *budget_av)
1000 1000
1001#define SUBID_DVBS_TV_STAR 0x0014 1001#define SUBID_DVBS_TV_STAR 0x0014
1002#define SUBID_DVBS_TV_STAR_CI 0x0016 1002#define SUBID_DVBS_TV_STAR_CI 0x0016
1003#define SUBID_DVBS_EASYWATCH 0x001e
1003#define SUBID_DVBC_KNC1 0x0020 1004#define SUBID_DVBC_KNC1 0x0020
1004#define SUBID_DVBC_KNC1_PLUS 0x0021 1005#define SUBID_DVBC_KNC1_PLUS 0x0021
1005#define SUBID_DVBC_CINERGY1200 0x1156 1006#define SUBID_DVBC_CINERGY1200 0x1156
@@ -1038,6 +1039,7 @@ static void frontend_init(struct budget_av *budget_av)
1038 case SUBID_DVBS_TV_STAR: 1039 case SUBID_DVBS_TV_STAR:
1039 case SUBID_DVBS_TV_STAR_CI: 1040 case SUBID_DVBS_TV_STAR_CI:
1040 case SUBID_DVBS_CYNERGY1200N: 1041 case SUBID_DVBS_CYNERGY1200N:
1042 case SUBID_DVBS_EASYWATCH:
1041 fe = stv0299_attach(&philips_sd1878_config, 1043 fe = stv0299_attach(&philips_sd1878_config,
1042 &budget_av->budget.i2c_adap); 1044 &budget_av->budget.i2c_adap);
1043 break; 1045 break;
@@ -1285,6 +1287,7 @@ MAKE_BUDGET_INFO(knc1s, "KNC1 DVB-S", BUDGET_KNC1S);
1285MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C); 1287MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C);
1286MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T); 1288MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T);
1287MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR); 1289MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
1290MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR);
1288MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP); 1291MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
1289MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP); 1292MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP);
1290MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP); 1293MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP);
@@ -1300,6 +1303,7 @@ static struct pci_device_id pci_tbl[] = {
1300 MAKE_EXTENSION_PCI(knc1sp, 0x1131, 0x0011), 1303 MAKE_EXTENSION_PCI(knc1sp, 0x1131, 0x0011),
1301 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014), 1304 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014),
1302 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016), 1305 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
1306 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
1303 MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020), 1307 MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
1304 MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021), 1308 MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021),
1305 MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030), 1309 MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030),
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index b9b3cd9c0369..5f91036f5b87 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -42,6 +42,9 @@
42#include "stv0299.h" 42#include "stv0299.h"
43#include "stv0297.h" 43#include "stv0297.h"
44#include "tda1004x.h" 44#include "tda1004x.h"
45#include "lnbp21.h"
46#include "bsbe1.h"
47#include "bsru6.h"
45 48
46#define DEBIADDR_IR 0x1234 49#define DEBIADDR_IR 0x1234
47#define DEBIADDR_CICONTROL 0x0000 50#define DEBIADDR_CICONTROL 0x0000
@@ -474,123 +477,6 @@ static void budget_ci_irq(struct saa7146_dev *dev, u32 * isr)
474 tasklet_schedule(&budget_ci->ciintf_irq_tasklet); 477 tasklet_schedule(&budget_ci->ciintf_irq_tasklet);
475} 478}
476 479
477
478static u8 alps_bsru6_inittab[] = {
479 0x01, 0x15,
480 0x02, 0x00,
481 0x03, 0x00,
482 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
483 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
484 0x06, 0x40, /* DAC not used, set to high impendance mode */
485 0x07, 0x00, /* DAC LSB */
486 0x08, 0x40, /* DiSEqC off, LNB power on OP2/LOCK pin on */
487 0x09, 0x00, /* FIFO */
488 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
489 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
490 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
491 0x10, 0x3f, // AGC2 0x3d
492 0x11, 0x84,
493 0x12, 0xb9,
494 0x15, 0xc9, // lock detector threshold
495 0x16, 0x00,
496 0x17, 0x00,
497 0x18, 0x00,
498 0x19, 0x00,
499 0x1a, 0x00,
500 0x1f, 0x50,
501 0x20, 0x00,
502 0x21, 0x00,
503 0x22, 0x00,
504 0x23, 0x00,
505 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
506 0x29, 0x1e, // 1/2 threshold
507 0x2a, 0x14, // 2/3 threshold
508 0x2b, 0x0f, // 3/4 threshold
509 0x2c, 0x09, // 5/6 threshold
510 0x2d, 0x05, // 7/8 threshold
511 0x2e, 0x01,
512 0x31, 0x1f, // test all FECs
513 0x32, 0x19, // viterbi and synchro search
514 0x33, 0xfc, // rs control
515 0x34, 0x93, // error control
516 0x0f, 0x52,
517 0xff, 0xff
518};
519
520static int alps_bsru6_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio)
521{
522 u8 aclk = 0;
523 u8 bclk = 0;
524
525 if (srate < 1500000) {
526 aclk = 0xb7;
527 bclk = 0x47;
528 } else if (srate < 3000000) {
529 aclk = 0xb7;
530 bclk = 0x4b;
531 } else if (srate < 7000000) {
532 aclk = 0xb7;
533 bclk = 0x4f;
534 } else if (srate < 14000000) {
535 aclk = 0xb7;
536 bclk = 0x53;
537 } else if (srate < 30000000) {
538 aclk = 0xb6;
539 bclk = 0x53;
540 } else if (srate < 45000000) {
541 aclk = 0xb4;
542 bclk = 0x51;
543 }
544
545 stv0299_writereg(fe, 0x13, aclk);
546 stv0299_writereg(fe, 0x14, bclk);
547 stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
548 stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
549 stv0299_writereg(fe, 0x21, (ratio) & 0xf0);
550
551 return 0;
552}
553
554static int alps_bsru6_pll_set(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters *params)
555{
556 u8 buf[4];
557 u32 div;
558 struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) };
559
560 if ((params->frequency < 950000) || (params->frequency > 2150000))
561 return -EINVAL;
562
563 div = (params->frequency + (125 - 1)) / 125; // round correctly
564 buf[0] = (div >> 8) & 0x7f;
565 buf[1] = div & 0xff;
566 buf[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
567 buf[3] = 0xC4;
568
569 if (params->frequency > 1530000)
570 buf[3] = 0xc0;
571
572 if (i2c_transfer(i2c, &msg, 1) != 1)
573 return -EIO;
574 return 0;
575}
576
577static struct stv0299_config alps_bsru6_config = {
578
579 .demod_address = 0x68,
580 .inittab = alps_bsru6_inittab,
581 .mclk = 88000000UL,
582 .invert = 1,
583 .skip_reinit = 0,
584 .lock_output = STV0229_LOCKOUTPUT_1,
585 .volt13_op0_op1 = STV0299_VOLT13_OP1,
586 .min_delay_ms = 100,
587 .set_symbol_rate = alps_bsru6_set_symbol_rate,
588 .pll_set = alps_bsru6_pll_set,
589};
590
591
592
593
594static u8 philips_su1278_tt_inittab[] = { 480static u8 philips_su1278_tt_inittab[] = {
595 0x01, 0x0f, 481 0x01, 0x0f,
596 0x02, 0x30, 482 0x02, 0x30,
@@ -1069,6 +955,20 @@ static void frontend_init(struct budget_ci *budget_ci)
1069 break; 955 break;
1070 } 956 }
1071 break; 957 break;
958
959 case 0x1017: // TT S-1500 PCI
960 budget_ci->budget.dvb_frontend = stv0299_attach(&alps_bsbe1_config, &budget_ci->budget.i2c_adap);
961 if (budget_ci->budget.dvb_frontend) {
962 budget_ci->budget.dvb_frontend->ops->dishnetwork_send_legacy_command = NULL;
963 if (lnbp21_init(budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, LNBP21_LLC, 0)) {
964 printk("%s: No LNBP21 found!\n", __FUNCTION__);
965 if (budget_ci->budget.dvb_frontend->ops->release)
966 budget_ci->budget.dvb_frontend->ops->release(budget_ci->budget.dvb_frontend);
967 budget_ci->budget.dvb_frontend = NULL;
968 }
969 }
970
971 break;
1072 } 972 }
1073 973
1074 if (budget_ci->budget.dvb_frontend == NULL) { 974 if (budget_ci->budget.dvb_frontend == NULL) {
@@ -1146,6 +1046,7 @@ static int budget_ci_detach(struct saa7146_dev *dev)
1146 1046
1147static struct saa7146_extension budget_extension; 1047static struct saa7146_extension budget_extension;
1148 1048
1049MAKE_BUDGET_INFO(ttbs2, "TT-Budget/S-1500 PCI", BUDGET_TT);
1149MAKE_BUDGET_INFO(ttbci, "TT-Budget/WinTV-NOVA-CI PCI", BUDGET_TT_HW_DISEQC); 1050MAKE_BUDGET_INFO(ttbci, "TT-Budget/WinTV-NOVA-CI PCI", BUDGET_TT_HW_DISEQC);
1150MAKE_BUDGET_INFO(ttbt2, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT); 1051MAKE_BUDGET_INFO(ttbt2, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
1151MAKE_BUDGET_INFO(ttbtci, "TT-Budget-T-CI PCI", BUDGET_TT); 1052MAKE_BUDGET_INFO(ttbtci, "TT-Budget-T-CI PCI", BUDGET_TT);
@@ -1157,6 +1058,7 @@ static struct pci_device_id pci_tbl[] = {
1157 MAKE_EXTENSION_PCI(ttbcci, 0x13c2, 0x1010), 1058 MAKE_EXTENSION_PCI(ttbcci, 0x13c2, 0x1010),
1158 MAKE_EXTENSION_PCI(ttbt2, 0x13c2, 0x1011), 1059 MAKE_EXTENSION_PCI(ttbt2, 0x13c2, 0x1011),
1159 MAKE_EXTENSION_PCI(ttbtci, 0x13c2, 0x1012), 1060 MAKE_EXTENSION_PCI(ttbtci, 0x13c2, 0x1012),
1061 MAKE_EXTENSION_PCI(ttbs2, 0x13c2, 0x1017),
1160 { 1062 {
1161 .vendor = 0, 1063 .vendor = 0,
1162 } 1064 }
diff --git a/drivers/media/dvb/ttpci/budget-patch.c b/drivers/media/dvb/ttpci/budget-patch.c
index fc416cf5253c..9fc9185a8426 100644
--- a/drivers/media/dvb/ttpci/budget-patch.c
+++ b/drivers/media/dvb/ttpci/budget-patch.c
@@ -37,6 +37,8 @@
37#include "ves1x93.h" 37#include "ves1x93.h"
38#include "tda8083.h" 38#include "tda8083.h"
39 39
40#include "bsru6.h"
41
40#define budget_patch budget 42#define budget_patch budget
41 43
42static struct saa7146_extension budget_extension; 44static struct saa7146_extension budget_extension;
@@ -290,103 +292,6 @@ static struct ves1x93_config alps_bsrv2_config = {
290 .pll_set = alps_bsrv2_pll_set, 292 .pll_set = alps_bsrv2_pll_set,
291}; 293};
292 294
293static u8 alps_bsru6_inittab[] = {
294 0x01, 0x15,
295 0x02, 0x00,
296 0x03, 0x00,
297 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
298 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
299 0x06, 0x40, /* DAC not used, set to high impendance mode */
300 0x07, 0x00, /* DAC LSB */
301 0x08, 0x40, /* DiSEqC off, LNB power on OP2/LOCK pin on */
302 0x09, 0x00, /* FIFO */
303 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
304 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
305 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
306 0x10, 0x3f, // AGC2 0x3d
307 0x11, 0x84,
308 0x12, 0xb9,
309 0x15, 0xc9, // lock detector threshold
310 0x16, 0x00,
311 0x17, 0x00,
312 0x18, 0x00,
313 0x19, 0x00,
314 0x1a, 0x00,
315 0x1f, 0x50,
316 0x20, 0x00,
317 0x21, 0x00,
318 0x22, 0x00,
319 0x23, 0x00,
320 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
321 0x29, 0x1e, // 1/2 threshold
322 0x2a, 0x14, // 2/3 threshold
323 0x2b, 0x0f, // 3/4 threshold
324 0x2c, 0x09, // 5/6 threshold
325 0x2d, 0x05, // 7/8 threshold
326 0x2e, 0x01,
327 0x31, 0x1f, // test all FECs
328 0x32, 0x19, // viterbi and synchro search
329 0x33, 0xfc, // rs control
330 0x34, 0x93, // error control
331 0x0f, 0x52,
332 0xff, 0xff
333};
334
335static int alps_bsru6_set_symbol_rate(struct dvb_frontend* fe, u32 srate, u32 ratio)
336{
337 u8 aclk = 0;
338 u8 bclk = 0;
339
340 if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; }
341 else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; }
342 else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; }
343 else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; }
344 else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; }
345 else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; }
346
347 stv0299_writereg (fe, 0x13, aclk);
348 stv0299_writereg (fe, 0x14, bclk);
349 stv0299_writereg (fe, 0x1f, (ratio >> 16) & 0xff);
350 stv0299_writereg (fe, 0x20, (ratio >> 8) & 0xff);
351 stv0299_writereg (fe, 0x21, (ratio ) & 0xf0);
352
353 return 0;
354}
355
356static int alps_bsru6_pll_set(struct dvb_frontend* fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters* params)
357{
358 u8 data[4];
359 u32 div;
360 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
361
362 if ((params->frequency < 950000) || (params->frequency > 2150000)) return -EINVAL;
363
364 div = (params->frequency + (125 - 1)) / 125; // round correctly
365 data[0] = (div >> 8) & 0x7f;
366 data[1] = div & 0xff;
367 data[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
368 data[3] = 0xC4;
369
370 if (params->frequency > 1530000) data[3] = 0xc0;
371
372 if (i2c_transfer(i2c, &msg, 1) != 1) return -EIO;
373 return 0;
374}
375
376static struct stv0299_config alps_bsru6_config = {
377
378 .demod_address = 0x68,
379 .inittab = alps_bsru6_inittab,
380 .mclk = 88000000UL,
381 .invert = 1,
382 .skip_reinit = 0,
383 .lock_output = STV0229_LOCKOUTPUT_1,
384 .volt13_op0_op1 = STV0299_VOLT13_OP1,
385 .min_delay_ms = 100,
386 .set_symbol_rate = alps_bsru6_set_symbol_rate,
387 .pll_set = alps_bsru6_pll_set,
388};
389
390static int grundig_29504_451_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) 295static int grundig_29504_451_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
391{ 296{
392 struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv; 297 struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv;
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 238c77b52f89..c23c02d95641 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -41,6 +41,8 @@
41#include "l64781.h" 41#include "l64781.h"
42#include "tda8083.h" 42#include "tda8083.h"
43#include "s5h1420.h" 43#include "s5h1420.h"
44#include "lnbp21.h"
45#include "bsru6.h"
44 46
45static void Set22K (struct budget *budget, int state) 47static void Set22K (struct budget *budget, int state)
46{ 48{
@@ -184,64 +186,6 @@ static int budget_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t m
184 return 0; 186 return 0;
185} 187}
186 188
187static int lnbp21_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
188{
189 struct budget* budget = (struct budget*) fe->dvb->priv;
190 u8 buf;
191 struct i2c_msg msg = { .addr = 0x08, .flags = I2C_M_RD, .buf = &buf, .len = sizeof(buf) };
192
193 if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
194
195 switch(voltage) {
196 case SEC_VOLTAGE_13:
197 buf = (buf & 0xf7) | 0x04;
198 break;
199
200 case SEC_VOLTAGE_18:
201 buf = (buf & 0xf7) | 0x0c;
202 break;
203
204 case SEC_VOLTAGE_OFF:
205 buf = buf & 0xf0;
206 break;
207 }
208
209 msg.flags = 0;
210 if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
211
212 return 0;
213}
214
215static int lnbp21_enable_high_lnb_voltage(struct dvb_frontend* fe, long arg)
216{
217 struct budget* budget = (struct budget*) fe->dvb->priv;
218 u8 buf;
219 struct i2c_msg msg = { .addr = 0x08, .flags = I2C_M_RD, .buf = &buf, .len = sizeof(buf) };
220
221 if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
222
223 if (arg) {
224 buf = buf | 0x10;
225 } else {
226 buf = buf & 0xef;
227 }
228
229 msg.flags = 0;
230 if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO;
231
232 return 0;
233}
234
235static int lnbp21_init(struct budget* budget)
236{
237 u8 buf = 0x00;
238 struct i2c_msg msg = { .addr = 0x08, .flags = 0, .buf = &buf, .len = sizeof(buf) };
239
240 if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1)
241 return -EIO;
242 return 0;
243}
244
245static int alps_bsrv2_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) 189static int alps_bsrv2_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
246{ 190{
247 struct budget* budget = (struct budget*) fe->dvb->priv; 191 struct budget* budget = (struct budget*) fe->dvb->priv;
@@ -277,176 +221,6 @@ static struct ves1x93_config alps_bsrv2_config =
277 .pll_set = alps_bsrv2_pll_set, 221 .pll_set = alps_bsrv2_pll_set,
278}; 222};
279 223
280static u8 alps_bsru6_inittab[] = {
281 0x01, 0x15,
282 0x02, 0x00,
283 0x03, 0x00,
284 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
285 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
286 0x06, 0x40, /* DAC not used, set to high impendance mode */
287 0x07, 0x00, /* DAC LSB */
288 0x08, 0x40, /* DiSEqC off, LNB power on OP2/LOCK pin on */
289 0x09, 0x00, /* FIFO */
290 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
291 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
292 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
293 0x10, 0x3f, // AGC2 0x3d
294 0x11, 0x84,
295 0x12, 0xb9,
296 0x15, 0xc9, // lock detector threshold
297 0x16, 0x00,
298 0x17, 0x00,
299 0x18, 0x00,
300 0x19, 0x00,
301 0x1a, 0x00,
302 0x1f, 0x50,
303 0x20, 0x00,
304 0x21, 0x00,
305 0x22, 0x00,
306 0x23, 0x00,
307 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
308 0x29, 0x1e, // 1/2 threshold
309 0x2a, 0x14, // 2/3 threshold
310 0x2b, 0x0f, // 3/4 threshold
311 0x2c, 0x09, // 5/6 threshold
312 0x2d, 0x05, // 7/8 threshold
313 0x2e, 0x01,
314 0x31, 0x1f, // test all FECs
315 0x32, 0x19, // viterbi and synchro search
316 0x33, 0xfc, // rs control
317 0x34, 0x93, // error control
318 0x0f, 0x52,
319 0xff, 0xff
320};
321
322static int alps_bsru6_set_symbol_rate(struct dvb_frontend* fe, u32 srate, u32 ratio)
323{
324 u8 aclk = 0;
325 u8 bclk = 0;
326
327 if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; }
328 else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; }
329 else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; }
330 else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; }
331 else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; }
332 else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; }
333
334 stv0299_writereg (fe, 0x13, aclk);
335 stv0299_writereg (fe, 0x14, bclk);
336 stv0299_writereg (fe, 0x1f, (ratio >> 16) & 0xff);
337 stv0299_writereg (fe, 0x20, (ratio >> 8) & 0xff);
338 stv0299_writereg (fe, 0x21, (ratio ) & 0xf0);
339
340 return 0;
341}
342
343static int alps_bsru6_pll_set(struct dvb_frontend* fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters* params)
344{
345 u8 data[4];
346 u32 div;
347 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
348
349 if ((params->frequency < 950000) || (params->frequency > 2150000)) return -EINVAL;
350
351 div = (params->frequency + (125 - 1)) / 125; // round correctly
352 data[0] = (div >> 8) & 0x7f;
353 data[1] = div & 0xff;
354 data[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
355 data[3] = 0xC4;
356
357 if (params->frequency > 1530000) data[3] = 0xc0;
358
359 if (i2c_transfer(i2c, &msg, 1) != 1) return -EIO;
360 return 0;
361}
362
363static struct stv0299_config alps_bsru6_config = {
364
365 .demod_address = 0x68,
366 .inittab = alps_bsru6_inittab,
367 .mclk = 88000000UL,
368 .invert = 1,
369 .skip_reinit = 0,
370 .lock_output = STV0229_LOCKOUTPUT_1,
371 .volt13_op0_op1 = STV0299_VOLT13_OP1,
372 .min_delay_ms = 100,
373 .set_symbol_rate = alps_bsru6_set_symbol_rate,
374 .pll_set = alps_bsru6_pll_set,
375};
376
377static u8 alps_bsbe1_inittab[] = {
378 0x01, 0x15,
379 0x02, 0x30,
380 0x03, 0x00,
381 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */
382 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */
383 0x06, 0x40, /* DAC not used, set to high impendance mode */
384 0x07, 0x00, /* DAC LSB */
385 0x08, 0x40, /* DiSEqC off, LNB power on OP2/LOCK pin on */
386 0x09, 0x00, /* FIFO */
387 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */
388 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */
389 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */
390 0x10, 0x3f, // AGC2 0x3d
391 0x11, 0x84,
392 0x12, 0xb9,
393 0x15, 0xc9, // lock detector threshold
394 0x16, 0x00,
395 0x17, 0x00,
396 0x18, 0x00,
397 0x19, 0x00,
398 0x1a, 0x00,
399 0x1f, 0x50,
400 0x20, 0x00,
401 0x21, 0x00,
402 0x22, 0x00,
403 0x23, 0x00,
404 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0
405 0x29, 0x1e, // 1/2 threshold
406 0x2a, 0x14, // 2/3 threshold
407 0x2b, 0x0f, // 3/4 threshold
408 0x2c, 0x09, // 5/6 threshold
409 0x2d, 0x05, // 7/8 threshold
410 0x2e, 0x01,
411 0x31, 0x1f, // test all FECs
412 0x32, 0x19, // viterbi and synchro search
413 0x33, 0xfc, // rs control
414 0x34, 0x93, // error control
415 0x0f, 0x92, // 0x80 = inverse AGC
416 0xff, 0xff
417};
418
419static int alps_bsbe1_pll_set(struct dvb_frontend* fe, struct i2c_adapter *i2c, struct dvb_frontend_parameters* params)
420{
421 int ret;
422 u8 data[4];
423 u32 div;
424 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
425
426 if ((params->frequency < 950000) || (params->frequency > 2150000))
427 return -EINVAL;
428
429 div = (params->frequency + (125 - 1)) / 125; // round correctly
430 data[0] = (div >> 8) & 0x7f;
431 data[1] = div & 0xff;
432 data[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
433 data[3] = (params->frequency > 1530000) ? 0xE0 : 0xE4;
434
435 ret = i2c_transfer(i2c, &msg, 1);
436 return (ret != 1) ? -EIO : 0;
437}
438
439static struct stv0299_config alps_bsbe1_config = {
440 .demod_address = 0x68,
441 .inittab = alps_bsbe1_inittab,
442 .mclk = 88000000UL,
443 .invert = 1,
444 .skip_reinit = 0,
445 .min_delay_ms = 100,
446 .set_symbol_rate = alps_bsru6_set_symbol_rate,
447 .pll_set = alps_bsbe1_pll_set,
448};
449
450static int alps_tdbe2_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) 224static int alps_tdbe2_pll_set(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
451{ 225{
452 struct budget* budget = (struct budget*) fe->dvb->priv; 226 struct budget* budget = (struct budget*) fe->dvb->priv;
@@ -580,20 +354,6 @@ static u8 read_pwm(struct budget* budget)
580static void frontend_init(struct budget *budget) 354static void frontend_init(struct budget *budget)
581{ 355{
582 switch(budget->dev->pci->subsystem_device) { 356 switch(budget->dev->pci->subsystem_device) {
583 case 0x1017:
584 // try the ALPS BSBE1 now
585 budget->dvb_frontend = stv0299_attach(&alps_bsbe1_config, &budget->i2c_adap);
586 if (budget->dvb_frontend) {
587 budget->dvb_frontend->ops->set_voltage = lnbp21_set_voltage;
588 budget->dvb_frontend->ops->enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage;
589 budget->dvb_frontend->ops->dishnetwork_send_legacy_command = NULL;
590 if (lnbp21_init(budget)) {
591 printk("%s: No LNBP21 found!\n", __FUNCTION__);
592 goto error_out;
593 }
594 }
595
596 break;
597 case 0x1003: // Hauppauge/TT Nova budget (stv0299/ALPS BSRU6(tsa5059) OR ves1893/ALPS BSRV2(sp5659)) 357 case 0x1003: // Hauppauge/TT Nova budget (stv0299/ALPS BSRU6(tsa5059) OR ves1893/ALPS BSRV2(sp5659))
598 case 0x1013: 358 case 0x1013:
599 // try the ALPS BSRV2 first of all 359 // try the ALPS BSRV2 first of all
@@ -646,9 +406,7 @@ static void frontend_init(struct budget *budget)
646 case 0x1016: // Hauppauge/TT Nova-S SE (samsung s5h1420/????(tda8260)) 406 case 0x1016: // Hauppauge/TT Nova-S SE (samsung s5h1420/????(tda8260))
647 budget->dvb_frontend = s5h1420_attach(&s5h1420_config, &budget->i2c_adap); 407 budget->dvb_frontend = s5h1420_attach(&s5h1420_config, &budget->i2c_adap);
648 if (budget->dvb_frontend) { 408 if (budget->dvb_frontend) {
649 budget->dvb_frontend->ops->set_voltage = lnbp21_set_voltage; 409 if (lnbp21_init(budget->dvb_frontend, &budget->i2c_adap, 0, 0)) {
650 budget->dvb_frontend->ops->enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage;
651 if (lnbp21_init(budget)) {
652 printk("%s: No LNBP21 found!\n", __FUNCTION__); 410 printk("%s: No LNBP21 found!\n", __FUNCTION__);
653 goto error_out; 411 goto error_out;
654 } 412 }
@@ -719,7 +477,6 @@ static int budget_detach (struct saa7146_dev* dev)
719 477
720static struct saa7146_extension budget_extension; 478static struct saa7146_extension budget_extension;
721 479
722MAKE_BUDGET_INFO(ttbs2, "TT-Budget/WinTV-NOVA-S PCI (rev AL/alps bsbe1 lnbp21 frontend)", BUDGET_TT);
723MAKE_BUDGET_INFO(ttbs, "TT-Budget/WinTV-NOVA-S PCI", BUDGET_TT); 480MAKE_BUDGET_INFO(ttbs, "TT-Budget/WinTV-NOVA-S PCI", BUDGET_TT);
724MAKE_BUDGET_INFO(ttbc, "TT-Budget/WinTV-NOVA-C PCI", BUDGET_TT); 481MAKE_BUDGET_INFO(ttbc, "TT-Budget/WinTV-NOVA-C PCI", BUDGET_TT);
725MAKE_BUDGET_INFO(ttbt, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT); 482MAKE_BUDGET_INFO(ttbt, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
@@ -732,7 +489,6 @@ static struct pci_device_id pci_tbl[] = {
732 MAKE_EXTENSION_PCI(ttbc, 0x13c2, 0x1004), 489 MAKE_EXTENSION_PCI(ttbc, 0x13c2, 0x1004),
733 MAKE_EXTENSION_PCI(ttbt, 0x13c2, 0x1005), 490 MAKE_EXTENSION_PCI(ttbt, 0x13c2, 0x1005),
734 MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013), 491 MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013),
735 MAKE_EXTENSION_PCI(ttbs2, 0x13c2, 0x1017),
736 MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016), 492 MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016),
737 MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60), 493 MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60),
738 MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61), 494 MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61),
diff --git a/drivers/media/dvb/ttpci/budget.h b/drivers/media/dvb/ttpci/budget.h
index c7bb63c4d98d..4ac0f4d08025 100644
--- a/drivers/media/dvb/ttpci/budget.h
+++ b/drivers/media/dvb/ttpci/budget.h
@@ -10,6 +10,8 @@
10#include "dvb_net.h" 10#include "dvb_net.h"
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/mutex.h>
14
13#include <media/saa7146.h> 15#include <media/saa7146.h>
14 16
15extern int budget_debug; 17extern int budget_debug;
@@ -51,7 +53,7 @@ struct budget {
51 struct dmx_frontend mem_frontend; 53 struct dmx_frontend mem_frontend;
52 54
53 int fe_synced; 55 int fe_synced;
54 struct semaphore pid_mutex; 56 struct mutex pid_mutex;
55 57
56 int ci_present; 58 int ci_present;
57 int video_port; 59 int video_port;
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index 5a13c4744f61..248fdc7accfb 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -19,7 +19,7 @@
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/jiffies.h> 21#include <linux/jiffies.h>
22#include <asm/semaphore.h> 22#include <linux/mutex.h>
23 23
24#include "dvb_frontend.h" 24#include "dvb_frontend.h"
25#include "dmxdev.h" 25#include "dmxdev.h"
@@ -35,7 +35,6 @@
35#include <linux/dvb/dmx.h> 35#include <linux/dvb/dmx.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38
39/* 38/*
40 TTUSB_HWSECTIONS: 39 TTUSB_HWSECTIONS:
41 the DSP supports filtering in hardware, however, since the "muxstream" 40 the DSP supports filtering in hardware, however, since the "muxstream"
@@ -83,8 +82,8 @@ struct ttusb {
83 struct dvb_net dvbnet; 82 struct dvb_net dvbnet;
84 83
85 /* and one for USB access. */ 84 /* and one for USB access. */
86 struct semaphore semi2c; 85 struct mutex semi2c;
87 struct semaphore semusb; 86 struct mutex semusb;
88 87
89 struct dvb_adapter adapter; 88 struct dvb_adapter adapter;
90 struct usb_device *dev; 89 struct usb_device *dev;
@@ -150,7 +149,7 @@ static int ttusb_cmd(struct ttusb *ttusb,
150 printk("\n"); 149 printk("\n");
151#endif 150#endif
152 151
153 if (down_interruptible(&ttusb->semusb) < 0) 152 if (mutex_lock_interruptible(&ttusb->semusb) < 0)
154 return -EAGAIN; 153 return -EAGAIN;
155 154
156 err = usb_bulk_msg(ttusb->dev, ttusb->bulk_out_pipe, 155 err = usb_bulk_msg(ttusb->dev, ttusb->bulk_out_pipe,
@@ -158,13 +157,13 @@ static int ttusb_cmd(struct ttusb *ttusb,
158 if (err != 0) { 157 if (err != 0) {
159 dprintk("%s: usb_bulk_msg(send) failed, err == %i!\n", 158 dprintk("%s: usb_bulk_msg(send) failed, err == %i!\n",
160 __FUNCTION__, err); 159 __FUNCTION__, err);
161 up(&ttusb->semusb); 160 mutex_unlock(&ttusb->semusb);
162 return err; 161 return err;
163 } 162 }
164 if (actual_len != len) { 163 if (actual_len != len) {
165 dprintk("%s: only wrote %d of %d bytes\n", __FUNCTION__, 164 dprintk("%s: only wrote %d of %d bytes\n", __FUNCTION__,
166 actual_len, len); 165 actual_len, len);
167 up(&ttusb->semusb); 166 mutex_unlock(&ttusb->semusb);
168 return -1; 167 return -1;
169 } 168 }
170 169
@@ -174,7 +173,7 @@ static int ttusb_cmd(struct ttusb *ttusb,
174 if (err != 0) { 173 if (err != 0) {
175 printk("%s: failed, receive error %d\n", __FUNCTION__, 174 printk("%s: failed, receive error %d\n", __FUNCTION__,
176 err); 175 err);
177 up(&ttusb->semusb); 176 mutex_unlock(&ttusb->semusb);
178 return err; 177 return err;
179 } 178 }
180#if DEBUG >= 3 179#if DEBUG >= 3
@@ -185,14 +184,14 @@ static int ttusb_cmd(struct ttusb *ttusb,
185 printk("\n"); 184 printk("\n");
186#endif 185#endif
187 if (!needresult) 186 if (!needresult)
188 up(&ttusb->semusb); 187 mutex_unlock(&ttusb->semusb);
189 return 0; 188 return 0;
190} 189}
191 190
192static int ttusb_result(struct ttusb *ttusb, u8 * data, int len) 191static int ttusb_result(struct ttusb *ttusb, u8 * data, int len)
193{ 192{
194 memcpy(data, ttusb->last_result, len); 193 memcpy(data, ttusb->last_result, len);
195 up(&ttusb->semusb); 194 mutex_unlock(&ttusb->semusb);
196 return 0; 195 return 0;
197} 196}
198 197
@@ -250,7 +249,7 @@ static int master_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, int num
250 int i = 0; 249 int i = 0;
251 int inc; 250 int inc;
252 251
253 if (down_interruptible(&ttusb->semi2c) < 0) 252 if (mutex_lock_interruptible(&ttusb->semi2c) < 0)
254 return -EAGAIN; 253 return -EAGAIN;
255 254
256 while (i < num) { 255 while (i < num) {
@@ -284,7 +283,7 @@ static int master_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, int num
284 i += inc; 283 i += inc;
285 } 284 }
286 285
287 up(&ttusb->semi2c); 286 mutex_unlock(&ttusb->semi2c);
288 return i; 287 return i;
289} 288}
290 289
@@ -689,8 +688,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
689 memcpy(ttusb->muxpack + ttusb->muxpack_ptr, 688 memcpy(ttusb->muxpack + ttusb->muxpack_ptr,
690 data, avail); 689 data, avail);
691 ttusb->muxpack_ptr += avail; 690 ttusb->muxpack_ptr += avail;
692 if (ttusb->muxpack_ptr > 264) 691 BUG_ON(ttusb->muxpack_ptr > 264);
693 BUG();
694 data += avail; 692 data += avail;
695 len -= avail; 693 len -= avail;
696 /* determine length */ 694 /* determine length */
@@ -1495,8 +1493,11 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
1495 ttusb->dev = udev; 1493 ttusb->dev = udev;
1496 ttusb->c = 0; 1494 ttusb->c = 0;
1497 ttusb->mux_state = 0; 1495 ttusb->mux_state = 0;
1498 sema_init(&ttusb->semi2c, 0); 1496 mutex_init(&ttusb->semi2c);
1499 sema_init(&ttusb->semusb, 1); 1497
1498 mutex_lock(&ttusb->semi2c);
1499
1500 mutex_init(&ttusb->semusb);
1500 1501
1501 ttusb_setup_interfaces(ttusb); 1502 ttusb_setup_interfaces(ttusb);
1502 1503
@@ -1504,7 +1505,7 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
1504 if (ttusb_init_controller(ttusb)) 1505 if (ttusb_init_controller(ttusb))
1505 printk("ttusb_init_controller: error\n"); 1506 printk("ttusb_init_controller: error\n");
1506 1507
1507 up(&ttusb->semi2c); 1508 mutex_unlock(&ttusb->semi2c);
1508 1509
1509 dvb_register_adapter(&ttusb->adapter, "Technotrend/Hauppauge Nova-USB", THIS_MODULE); 1510 dvb_register_adapter(&ttusb->adapter, "Technotrend/Hauppauge Nova-USB", THIS_MODULE);
1510 ttusb->adapter.priv = ttusb; 1511 ttusb->adapter.priv = ttusb;
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index df831171e03c..44dea3211848 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -20,7 +20,8 @@
20 * 20 *
21 */ 21 */
22 22
23#include <asm/semaphore.h> 23#include <linux/mutex.h>
24
24#include <linux/list.h> 25#include <linux/list.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
@@ -115,7 +116,7 @@ struct ttusb_dec {
115 unsigned int out_pipe; 116 unsigned int out_pipe;
116 unsigned int irq_pipe; 117 unsigned int irq_pipe;
117 enum ttusb_dec_interface interface; 118 enum ttusb_dec_interface interface;
118 struct semaphore usb_sem; 119 struct mutex usb_mutex;
119 120
120 void *irq_buffer; 121 void *irq_buffer;
121 struct urb *irq_urb; 122 struct urb *irq_urb;
@@ -124,7 +125,7 @@ struct ttusb_dec {
124 dma_addr_t iso_dma_handle; 125 dma_addr_t iso_dma_handle;
125 struct urb *iso_urb[ISO_BUF_COUNT]; 126 struct urb *iso_urb[ISO_BUF_COUNT];
126 int iso_stream_count; 127 int iso_stream_count;
127 struct semaphore iso_sem; 128 struct mutex iso_mutex;
128 129
129 u8 packet[MAX_PVA_LENGTH + 4]; 130 u8 packet[MAX_PVA_LENGTH + 4];
130 enum ttusb_dec_packet_type packet_type; 131 enum ttusb_dec_packet_type packet_type;
@@ -273,9 +274,9 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
273 if (!b) 274 if (!b)
274 return -ENOMEM; 275 return -ENOMEM;
275 276
276 if ((result = down_interruptible(&dec->usb_sem))) { 277 if ((result = mutex_lock_interruptible(&dec->usb_mutex))) {
277 kfree(b); 278 kfree(b);
278 printk("%s: Failed to down usb semaphore.\n", __FUNCTION__); 279 printk("%s: Failed to lock usb mutex.\n", __FUNCTION__);
279 return result; 280 return result;
280 } 281 }
281 282
@@ -300,7 +301,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
300 if (result) { 301 if (result) {
301 printk("%s: command bulk message failed: error %d\n", 302 printk("%s: command bulk message failed: error %d\n",
302 __FUNCTION__, result); 303 __FUNCTION__, result);
303 up(&dec->usb_sem); 304 mutex_unlock(&dec->usb_mutex);
304 kfree(b); 305 kfree(b);
305 return result; 306 return result;
306 } 307 }
@@ -311,7 +312,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
311 if (result) { 312 if (result) {
312 printk("%s: result bulk message failed: error %d\n", 313 printk("%s: result bulk message failed: error %d\n",
313 __FUNCTION__, result); 314 __FUNCTION__, result);
314 up(&dec->usb_sem); 315 mutex_unlock(&dec->usb_mutex);
315 kfree(b); 316 kfree(b);
316 return result; 317 return result;
317 } else { 318 } else {
@@ -327,7 +328,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
327 if (cmd_result && b[3] > 0) 328 if (cmd_result && b[3] > 0)
328 memcpy(cmd_result, &b[4], b[3]); 329 memcpy(cmd_result, &b[4], b[3]);
329 330
330 up(&dec->usb_sem); 331 mutex_unlock(&dec->usb_mutex);
331 332
332 kfree(b); 333 kfree(b);
333 return 0; 334 return 0;
@@ -835,7 +836,7 @@ static void ttusb_dec_stop_iso_xfer(struct ttusb_dec *dec)
835 836
836 dprintk("%s\n", __FUNCTION__); 837 dprintk("%s\n", __FUNCTION__);
837 838
838 if (down_interruptible(&dec->iso_sem)) 839 if (mutex_lock_interruptible(&dec->iso_mutex))
839 return; 840 return;
840 841
841 dec->iso_stream_count--; 842 dec->iso_stream_count--;
@@ -845,7 +846,7 @@ static void ttusb_dec_stop_iso_xfer(struct ttusb_dec *dec)
845 usb_kill_urb(dec->iso_urb[i]); 846 usb_kill_urb(dec->iso_urb[i]);
846 } 847 }
847 848
848 up(&dec->iso_sem); 849 mutex_unlock(&dec->iso_mutex);
849} 850}
850 851
851/* Setting the interface of the DEC tends to take down the USB communications 852/* Setting the interface of the DEC tends to take down the USB communications
@@ -890,7 +891,7 @@ static int ttusb_dec_start_iso_xfer(struct ttusb_dec *dec)
890 891
891 dprintk("%s\n", __FUNCTION__); 892 dprintk("%s\n", __FUNCTION__);
892 893
893 if (down_interruptible(&dec->iso_sem)) 894 if (mutex_lock_interruptible(&dec->iso_mutex))
894 return -EAGAIN; 895 return -EAGAIN;
895 896
896 if (!dec->iso_stream_count) { 897 if (!dec->iso_stream_count) {
@@ -911,7 +912,7 @@ static int ttusb_dec_start_iso_xfer(struct ttusb_dec *dec)
911 i--; 912 i--;
912 } 913 }
913 914
914 up(&dec->iso_sem); 915 mutex_unlock(&dec->iso_mutex);
915 return result; 916 return result;
916 } 917 }
917 } 918 }
@@ -919,7 +920,7 @@ static int ttusb_dec_start_iso_xfer(struct ttusb_dec *dec)
919 920
920 dec->iso_stream_count++; 921 dec->iso_stream_count++;
921 922
922 up(&dec->iso_sem); 923 mutex_unlock(&dec->iso_mutex);
923 924
924 return 0; 925 return 0;
925} 926}
@@ -1229,8 +1230,8 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
1229{ 1230{
1230 dprintk("%s\n", __FUNCTION__); 1231 dprintk("%s\n", __FUNCTION__);
1231 1232
1232 sema_init(&dec->usb_sem, 1); 1233 mutex_init(&dec->usb_mutex);
1233 sema_init(&dec->iso_sem, 1); 1234 mutex_init(&dec->iso_mutex);
1234 1235
1235 dec->command_pipe = usb_sndbulkpipe(dec->udev, COMMAND_PIPE); 1236 dec->command_pipe = usb_sndbulkpipe(dec->udev, COMMAND_PIPE);
1236 dec->result_pipe = usb_rcvbulkpipe(dec->udev, RESULT_PIPE); 1237 dec->result_pipe = usb_rcvbulkpipe(dec->udev, RESULT_PIPE);
diff --git a/drivers/media/radio/miropcm20-rds-core.c b/drivers/media/radio/miropcm20-rds-core.c
index a917a90cb5dc..b602c73e2309 100644
--- a/drivers/media/radio/miropcm20-rds-core.c
+++ b/drivers/media/radio/miropcm20-rds-core.c
@@ -18,14 +18,15 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <asm/semaphore.h> 21#include <linux/mutex.h>
22
22#include <asm/io.h> 23#include <asm/io.h>
23#include "../../../sound/oss/aci.h" 24#include "../../../sound/oss/aci.h"
24#include "miropcm20-rds-core.h" 25#include "miropcm20-rds-core.h"
25 26
26#define DEBUG 0 27#define DEBUG 0
27 28
28static struct semaphore aci_rds_sem; 29static struct mutex aci_rds_mutex;
29 30
30#define RDS_DATASHIFT 2 /* Bit 2 */ 31#define RDS_DATASHIFT 2 /* Bit 2 */
31#define RDS_DATAMASK (1 << RDS_DATASHIFT) 32#define RDS_DATAMASK (1 << RDS_DATASHIFT)
@@ -181,7 +182,7 @@ int aci_rds_cmd(unsigned char cmd, unsigned char databuffer[], int datasize)
181{ 182{
182 int ret; 183 int ret;
183 184
184 if (down_interruptible(&aci_rds_sem)) 185 if (mutex_lock_interruptible(&aci_rds_mutex))
185 return -EINTR; 186 return -EINTR;
186 187
187 rds_write(cmd); 188 rds_write(cmd);
@@ -192,7 +193,7 @@ int aci_rds_cmd(unsigned char cmd, unsigned char databuffer[], int datasize)
192 else 193 else
193 ret = 0; 194 ret = 0;
194 195
195 up(&aci_rds_sem); 196 mutex_unlock(&aci_rds_mutex);
196 197
197 return ret; 198 return ret;
198} 199}
@@ -200,7 +201,7 @@ EXPORT_SYMBOL(aci_rds_cmd);
200 201
201int __init attach_aci_rds(void) 202int __init attach_aci_rds(void)
202{ 203{
203 init_MUTEX(&aci_rds_sem); 204 mutex_init(&aci_rds_mutex);
204 return 0; 205 return 0;
205} 206}
206 207
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 914deab4e044..557fb5c4af38 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -43,7 +43,7 @@
43 43
44static int io = CONFIG_RADIO_RTRACK_PORT; 44static int io = CONFIG_RADIO_RTRACK_PORT;
45static int radio_nr = -1; 45static int radio_nr = -1;
46static struct semaphore lock; 46static struct mutex lock;
47 47
48struct rt_device 48struct rt_device
49{ 49{
@@ -83,23 +83,23 @@ static void rt_incvol(void)
83static void rt_mute(struct rt_device *dev) 83static void rt_mute(struct rt_device *dev)
84{ 84{
85 dev->muted = 1; 85 dev->muted = 1;
86 down(&lock); 86 mutex_lock(&lock);
87 outb(0xd0, io); /* volume steady, off */ 87 outb(0xd0, io); /* volume steady, off */
88 up(&lock); 88 mutex_unlock(&lock);
89} 89}
90 90
91static int rt_setvol(struct rt_device *dev, int vol) 91static int rt_setvol(struct rt_device *dev, int vol)
92{ 92{
93 int i; 93 int i;
94 94
95 down(&lock); 95 mutex_lock(&lock);
96 96
97 if(vol == dev->curvol) { /* requested volume = current */ 97 if(vol == dev->curvol) { /* requested volume = current */
98 if (dev->muted) { /* user is unmuting the card */ 98 if (dev->muted) { /* user is unmuting the card */
99 dev->muted = 0; 99 dev->muted = 0;
100 outb (0xd8, io); /* enable card */ 100 outb (0xd8, io); /* enable card */
101 } 101 }
102 up(&lock); 102 mutex_unlock(&lock);
103 return 0; 103 return 0;
104 } 104 }
105 105
@@ -108,7 +108,7 @@ static int rt_setvol(struct rt_device *dev, int vol)
108 sleep_delay(2000000); /* make sure it's totally down */ 108 sleep_delay(2000000); /* make sure it's totally down */
109 outb(0xd0, io); /* volume steady, off */ 109 outb(0xd0, io); /* volume steady, off */
110 dev->curvol = 0; /* track the volume state! */ 110 dev->curvol = 0; /* track the volume state! */
111 up(&lock); 111 mutex_unlock(&lock);
112 return 0; 112 return 0;
113 } 113 }
114 114
@@ -121,7 +121,7 @@ static int rt_setvol(struct rt_device *dev, int vol)
121 rt_decvol(); 121 rt_decvol();
122 122
123 dev->curvol = vol; 123 dev->curvol = vol;
124 up(&lock); 124 mutex_unlock(&lock);
125 return 0; 125 return 0;
126} 126}
127 127
@@ -168,7 +168,7 @@ static int rt_setfreq(struct rt_device *dev, unsigned long freq)
168 freq += 171200; /* Add 10.7 MHz IF */ 168 freq += 171200; /* Add 10.7 MHz IF */
169 freq /= 800; /* Convert to 50 kHz units */ 169 freq /= 800; /* Convert to 50 kHz units */
170 170
171 down(&lock); /* Stop other ops interfering */ 171 mutex_lock(&lock); /* Stop other ops interfering */
172 172
173 send_0_byte (io, dev); /* 0: LSB of frequency */ 173 send_0_byte (io, dev); /* 0: LSB of frequency */
174 174
@@ -196,7 +196,7 @@ static int rt_setfreq(struct rt_device *dev, unsigned long freq)
196 else 196 else
197 outb (0xd8, io); /* volume steady + sigstr + on */ 197 outb (0xd8, io); /* volume steady + sigstr + on */
198 198
199 up(&lock); 199 mutex_unlock(&lock);
200 200
201 return 0; 201 return 0;
202} 202}
@@ -337,7 +337,7 @@ static int __init rtrack_init(void)
337 337
338 /* Set up the I/O locking */ 338 /* Set up the I/O locking */
339 339
340 init_MUTEX(&lock); 340 mutex_init(&lock);
341 341
342 /* mute card - prevents noisy bootups */ 342 /* mute card - prevents noisy bootups */
343 343
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 523be820f9c6..83bdae23417d 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -42,7 +42,7 @@
42static int io = CONFIG_RADIO_AZTECH_PORT; 42static int io = CONFIG_RADIO_AZTECH_PORT;
43static int radio_nr = -1; 43static int radio_nr = -1;
44static int radio_wait_time = 1000; 44static int radio_wait_time = 1000;
45static struct semaphore lock; 45static struct mutex lock;
46 46
47struct az_device 47struct az_device
48{ 48{
@@ -87,9 +87,9 @@ static void send_1_byte (struct az_device *dev)
87 87
88static int az_setvol(struct az_device *dev, int vol) 88static int az_setvol(struct az_device *dev, int vol)
89{ 89{
90 down(&lock); 90 mutex_lock(&lock);
91 outb (volconvert(vol), io); 91 outb (volconvert(vol), io);
92 up(&lock); 92 mutex_unlock(&lock);
93 return 0; 93 return 0;
94} 94}
95 95
@@ -122,7 +122,7 @@ static int az_setfreq(struct az_device *dev, unsigned long frequency)
122 frequency += 171200; /* Add 10.7 MHz IF */ 122 frequency += 171200; /* Add 10.7 MHz IF */
123 frequency /= 800; /* Convert to 50 kHz units */ 123 frequency /= 800; /* Convert to 50 kHz units */
124 124
125 down(&lock); 125 mutex_lock(&lock);
126 126
127 send_0_byte (dev); /* 0: LSB of frequency */ 127 send_0_byte (dev); /* 0: LSB of frequency */
128 128
@@ -152,7 +152,7 @@ static int az_setfreq(struct az_device *dev, unsigned long frequency)
152 udelay (radio_wait_time); 152 udelay (radio_wait_time);
153 outb_p(128+64+volconvert(dev->curvol), io); 153 outb_p(128+64+volconvert(dev->curvol), io);
154 154
155 up(&lock); 155 mutex_unlock(&lock);
156 156
157 return 0; 157 return 0;
158} 158}
@@ -283,7 +283,7 @@ static int __init aztech_init(void)
283 return -EBUSY; 283 return -EBUSY;
284 } 284 }
285 285
286 init_MUTEX(&lock); 286 mutex_init(&lock);
287 aztech_radio.priv=&aztech_unit; 287 aztech_radio.priv=&aztech_unit;
288 288
289 if(video_register_device(&aztech_radio, VFL_TYPE_RADIO, radio_nr)==-1) 289 if(video_register_device(&aztech_radio, VFL_TYPE_RADIO, radio_nr)==-1)
diff --git a/drivers/media/radio/radio-maestro.c b/drivers/media/radio/radio-maestro.c
index 36c9f5bf8cdd..39c1d9118636 100644
--- a/drivers/media/radio/radio-maestro.c
+++ b/drivers/media/radio/radio-maestro.c
@@ -23,10 +23,11 @@
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/semaphore.h> 26#include <linux/mutex.h>
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/videodev.h> 28#include <linux/videodev.h>
29 29
30
30#define DRIVER_VERSION "0.05" 31#define DRIVER_VERSION "0.05"
31 32
32#define GPIO_DATA 0x60 /* port offset from ESS_IO_BASE */ 33#define GPIO_DATA 0x60 /* port offset from ESS_IO_BASE */
@@ -104,7 +105,7 @@ struct radio_device {
104 muted, /* VIDEO_AUDIO_MUTE */ 105 muted, /* VIDEO_AUDIO_MUTE */
105 stereo, /* VIDEO_TUNER_STEREO_ON */ 106 stereo, /* VIDEO_TUNER_STEREO_ON */
106 tuned; /* signal strength (0 or 0xffff) */ 107 tuned; /* signal strength (0 or 0xffff) */
107 struct semaphore lock; 108 struct mutex lock;
108}; 109};
109 110
110static u32 radio_bits_get(struct radio_device *dev) 111static u32 radio_bits_get(struct radio_device *dev)
@@ -258,9 +259,9 @@ static int radio_ioctl(struct inode *inode, struct file *file,
258 struct radio_device *card = video_get_drvdata(dev); 259 struct radio_device *card = video_get_drvdata(dev);
259 int ret; 260 int ret;
260 261
261 down(&card->lock); 262 mutex_lock(&card->lock);
262 ret = video_usercopy(inode, file, cmd, arg, radio_function); 263 ret = video_usercopy(inode, file, cmd, arg, radio_function);
263 up(&card->lock); 264 mutex_unlock(&card->lock);
264 265
265 return ret; 266 return ret;
266} 267}
@@ -311,7 +312,7 @@ static int __devinit maestro_probe(struct pci_dev *pdev,
311 } 312 }
312 313
313 radio_unit->io = pci_resource_start(pdev, 0) + GPIO_DATA; 314 radio_unit->io = pci_resource_start(pdev, 0) + GPIO_DATA;
314 init_MUTEX(&radio_unit->lock); 315 mutex_init(&radio_unit->lock);
315 316
316 maestro_radio_inst = video_device_alloc(); 317 maestro_radio_inst = video_device_alloc();
317 if (maestro_radio_inst == NULL) { 318 if (maestro_radio_inst == NULL) {
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index c975ddd86cd5..f0bf47bcb64c 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -37,7 +37,8 @@
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/semaphore.h> 40#include <linux/mutex.h>
41
41#include <linux/pci.h> 42#include <linux/pci.h>
42#include <linux/videodev.h> 43#include <linux/videodev.h>
43 44
@@ -101,7 +102,7 @@ static struct radio_device
101 102
102 unsigned long freq; 103 unsigned long freq;
103 104
104 struct semaphore lock; 105 struct mutex lock;
105} radio_unit = {0, 0, 0, 0, }; 106} radio_unit = {0, 0, 0, 0, };
106 107
107 108
@@ -267,9 +268,9 @@ static int radio_ioctl(struct inode *inode, struct file *file,
267 struct radio_device *card=dev->priv; 268 struct radio_device *card=dev->priv;
268 int ret; 269 int ret;
269 270
270 down(&card->lock); 271 mutex_lock(&card->lock);
271 ret = video_usercopy(inode, file, cmd, arg, radio_function); 272 ret = video_usercopy(inode, file, cmd, arg, radio_function);
272 up(&card->lock); 273 mutex_unlock(&card->lock);
273 return ret; 274 return ret;
274} 275}
275 276
@@ -290,7 +291,7 @@ static int __devinit maxiradio_init_one(struct pci_dev *pdev, const struct pci_d
290 goto err_out_free_region; 291 goto err_out_free_region;
291 292
292 radio_unit.io = pci_resource_start(pdev, 0); 293 radio_unit.io = pci_resource_start(pdev, 0);
293 init_MUTEX(&radio_unit.lock); 294 mutex_init(&radio_unit.lock);
294 maxiradio_radio.priv = &radio_unit; 295 maxiradio_radio.priv = &radio_unit;
295 296
296 if(video_register_device(&maxiradio_radio, VFL_TYPE_RADIO, radio_nr)==-1) { 297 if(video_register_device(&maxiradio_radio, VFL_TYPE_RADIO, radio_nr)==-1) {
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 0229f792a059..53073b424107 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -24,7 +24,7 @@
24#include <linux/isapnp.h> 24#include <linux/isapnp.h>
25#include <asm/io.h> /* outb, outb_p */ 25#include <asm/io.h> /* outb, outb_p */
26#include <asm/uaccess.h> /* copy to/from user */ 26#include <asm/uaccess.h> /* copy to/from user */
27#include <asm/semaphore.h> 27#include <linux/mutex.h>
28 28
29struct fmi_device 29struct fmi_device
30{ 30{
@@ -37,7 +37,7 @@ struct fmi_device
37static int io = -1; 37static int io = -1;
38static int radio_nr = -1; 38static int radio_nr = -1;
39static struct pnp_dev *dev = NULL; 39static struct pnp_dev *dev = NULL;
40static struct semaphore lock; 40static struct mutex lock;
41 41
42/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */ 42/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */
43/* It is only useful to give freq in intervall of 800 (=0.05Mhz), 43/* It is only useful to give freq in intervall of 800 (=0.05Mhz),
@@ -68,16 +68,16 @@ static void outbits(int bits, unsigned int data, int port)
68 68
69static inline void fmi_mute(int port) 69static inline void fmi_mute(int port)
70{ 70{
71 down(&lock); 71 mutex_lock(&lock);
72 outb(0x00, port); 72 outb(0x00, port);
73 up(&lock); 73 mutex_unlock(&lock);
74} 74}
75 75
76static inline void fmi_unmute(int port) 76static inline void fmi_unmute(int port)
77{ 77{
78 down(&lock); 78 mutex_lock(&lock);
79 outb(0x08, port); 79 outb(0x08, port);
80 up(&lock); 80 mutex_unlock(&lock);
81} 81}
82 82
83static inline int fmi_setfreq(struct fmi_device *dev) 83static inline int fmi_setfreq(struct fmi_device *dev)
@@ -85,12 +85,12 @@ static inline int fmi_setfreq(struct fmi_device *dev)
85 int myport = dev->port; 85 int myport = dev->port;
86 unsigned long freq = dev->curfreq; 86 unsigned long freq = dev->curfreq;
87 87
88 down(&lock); 88 mutex_lock(&lock);
89 89
90 outbits(16, RSF16_ENCODE(freq), myport); 90 outbits(16, RSF16_ENCODE(freq), myport);
91 outbits(8, 0xC0, myport); 91 outbits(8, 0xC0, myport);
92 msleep(143); /* was schedule_timeout(HZ/7) */ 92 msleep(143); /* was schedule_timeout(HZ/7) */
93 up(&lock); 93 mutex_unlock(&lock);
94 if (dev->curvol) fmi_unmute(myport); 94 if (dev->curvol) fmi_unmute(myport);
95 return 0; 95 return 0;
96} 96}
@@ -102,7 +102,7 @@ static inline int fmi_getsigstr(struct fmi_device *dev)
102 int myport = dev->port; 102 int myport = dev->port;
103 103
104 104
105 down(&lock); 105 mutex_lock(&lock);
106 val = dev->curvol ? 0x08 : 0x00; /* unmute/mute */ 106 val = dev->curvol ? 0x08 : 0x00; /* unmute/mute */
107 outb(val, myport); 107 outb(val, myport);
108 outb(val | 0x10, myport); 108 outb(val | 0x10, myport);
@@ -110,7 +110,7 @@ static inline int fmi_getsigstr(struct fmi_device *dev)
110 res = (int)inb(myport+1); 110 res = (int)inb(myport+1);
111 outb(val, myport); 111 outb(val, myport);
112 112
113 up(&lock); 113 mutex_unlock(&lock);
114 return (res & 2) ? 0 : 0xFFFF; 114 return (res & 2) ? 0 : 0xFFFF;
115} 115}
116 116
@@ -296,7 +296,7 @@ static int __init fmi_init(void)
296 fmi_unit.flags = VIDEO_TUNER_LOW; 296 fmi_unit.flags = VIDEO_TUNER_LOW;
297 fmi_radio.priv = &fmi_unit; 297 fmi_radio.priv = &fmi_unit;
298 298
299 init_MUTEX(&lock); 299 mutex_init(&lock);
300 300
301 if (video_register_device(&fmi_radio, VFL_TYPE_RADIO, radio_nr) == -1) { 301 if (video_register_device(&fmi_radio, VFL_TYPE_RADIO, radio_nr) == -1) {
302 release_region(io, 2); 302 release_region(io, 2);
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 099ffb3b9c71..bcebd8cb19ad 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -19,9 +19,9 @@
19#include <asm/io.h> /* outb, outb_p */ 19#include <asm/io.h> /* outb, outb_p */
20#include <asm/uaccess.h> /* copy to/from user */ 20#include <asm/uaccess.h> /* copy to/from user */
21#include <linux/videodev.h> /* kernel radio structs */ 21#include <linux/videodev.h> /* kernel radio structs */
22#include <asm/semaphore.h> 22#include <linux/mutex.h>
23 23
24static struct semaphore lock; 24static struct mutex lock;
25 25
26#undef DEBUG 26#undef DEBUG
27//#define DEBUG 1 27//#define DEBUG 1
@@ -238,9 +238,9 @@ static int fmr2_do_ioctl(struct inode *inode, struct file *file,
238 if (fmr2->mute) 238 if (fmr2->mute)
239 v->flags |= VIDEO_AUDIO_MUTE; 239 v->flags |= VIDEO_AUDIO_MUTE;
240 v->mode=VIDEO_MODE_AUTO; 240 v->mode=VIDEO_MODE_AUTO;
241 down(&lock); 241 mutex_lock(&lock);
242 v->signal = fmr2_getsigstr(fmr2); 242 v->signal = fmr2_getsigstr(fmr2);
243 up(&lock); 243 mutex_unlock(&lock);
244 return 0; 244 return 0;
245 } 245 }
246 case VIDIOCSTUNER: 246 case VIDIOCSTUNER:
@@ -274,9 +274,9 @@ static int fmr2_do_ioctl(struct inode *inode, struct file *file,
274 /* set card freq (if not muted) */ 274 /* set card freq (if not muted) */
275 if (fmr2->curvol && !fmr2->mute) 275 if (fmr2->curvol && !fmr2->mute)
276 { 276 {
277 down(&lock); 277 mutex_lock(&lock);
278 fmr2_setfreq(fmr2); 278 fmr2_setfreq(fmr2);
279 up(&lock); 279 mutex_unlock(&lock);
280 } 280 }
281 return 0; 281 return 0;
282 } 282 }
@@ -318,14 +318,14 @@ static int fmr2_do_ioctl(struct inode *inode, struct file *file,
318 else 318 else
319 printk(KERN_DEBUG "mute\n"); 319 printk(KERN_DEBUG "mute\n");
320#endif 320#endif
321 down(&lock); 321 mutex_lock(&lock);
322 if (fmr2->curvol && !fmr2->mute) 322 if (fmr2->curvol && !fmr2->mute)
323 { 323 {
324 fmr2_setvolume(fmr2); 324 fmr2_setvolume(fmr2);
325 fmr2_setfreq(fmr2); 325 fmr2_setfreq(fmr2);
326 } 326 }
327 else fmr2_mute(fmr2->port); 327 else fmr2_mute(fmr2->port);
328 up(&lock); 328 mutex_unlock(&lock);
329 return 0; 329 return 0;
330 } 330 }
331 case VIDIOCGUNIT: 331 case VIDIOCGUNIT:
@@ -380,7 +380,7 @@ static int __init fmr2_init(void)
380 fmr2_unit.card_type = 0; 380 fmr2_unit.card_type = 0;
381 fmr2_radio.priv = &fmr2_unit; 381 fmr2_radio.priv = &fmr2_unit;
382 382
383 init_MUTEX(&lock); 383 mutex_init(&lock);
384 384
385 if (request_region(io, 2, "sf16fmr2")) 385 if (request_region(io, 2, "sf16fmr2"))
386 { 386 {
@@ -397,10 +397,10 @@ static int __init fmr2_init(void)
397 printk(KERN_INFO "SF16FMR2 radio card driver at 0x%x.\n", io); 397 printk(KERN_INFO "SF16FMR2 radio card driver at 0x%x.\n", io);
398 debug_print((KERN_DEBUG "Mute %d Low %d\n",VIDEO_AUDIO_MUTE,VIDEO_TUNER_LOW)); 398 debug_print((KERN_DEBUG "Mute %d Low %d\n",VIDEO_AUDIO_MUTE,VIDEO_TUNER_LOW));
399 /* mute card - prevents noisy bootups */ 399 /* mute card - prevents noisy bootups */
400 down(&lock); 400 mutex_lock(&lock);
401 fmr2_mute(io); 401 fmr2_mute(io);
402 fmr2_product_info(&fmr2_unit); 402 fmr2_product_info(&fmr2_unit);
403 up(&lock); 403 mutex_unlock(&lock);
404 debug_print((KERN_DEBUG "card_type %d\n", fmr2_unit.card_type)); 404 debug_print((KERN_DEBUG "card_type %d\n", fmr2_unit.card_type));
405 return 0; 405 return 0;
406} 406}
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 8ac9a8ef9094..e50955836d6b 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -59,7 +59,7 @@ struct typhoon_device {
59 int muted; 59 int muted;
60 unsigned long curfreq; 60 unsigned long curfreq;
61 unsigned long mutefreq; 61 unsigned long mutefreq;
62 struct semaphore lock; 62 struct mutex lock;
63}; 63};
64 64
65static void typhoon_setvol_generic(struct typhoon_device *dev, int vol); 65static void typhoon_setvol_generic(struct typhoon_device *dev, int vol);
@@ -77,12 +77,12 @@ static int typhoon_get_info(char *buf, char **start, off_t offset, int len);
77 77
78static void typhoon_setvol_generic(struct typhoon_device *dev, int vol) 78static void typhoon_setvol_generic(struct typhoon_device *dev, int vol)
79{ 79{
80 down(&dev->lock); 80 mutex_lock(&dev->lock);
81 vol >>= 14; /* Map 16 bit to 2 bit */ 81 vol >>= 14; /* Map 16 bit to 2 bit */
82 vol &= 3; 82 vol &= 3;
83 outb_p(vol / 2, dev->iobase); /* Set the volume, high bit. */ 83 outb_p(vol / 2, dev->iobase); /* Set the volume, high bit. */
84 outb_p(vol % 2, dev->iobase + 2); /* Set the volume, low bit. */ 84 outb_p(vol % 2, dev->iobase + 2); /* Set the volume, low bit. */
85 up(&dev->lock); 85 mutex_unlock(&dev->lock);
86} 86}
87 87
88static int typhoon_setfreq_generic(struct typhoon_device *dev, 88static int typhoon_setfreq_generic(struct typhoon_device *dev,
@@ -102,7 +102,7 @@ static int typhoon_setfreq_generic(struct typhoon_device *dev,
102 * 102 *
103 */ 103 */
104 104
105 down(&dev->lock); 105 mutex_lock(&dev->lock);
106 x = frequency / 160; 106 x = frequency / 160;
107 outval = (x * x + 2500) / 5000; 107 outval = (x * x + 2500) / 5000;
108 outval = (outval * x + 5000) / 10000; 108 outval = (outval * x + 5000) / 10000;
@@ -112,7 +112,7 @@ static int typhoon_setfreq_generic(struct typhoon_device *dev,
112 outb_p((outval >> 8) & 0x01, dev->iobase + 4); 112 outb_p((outval >> 8) & 0x01, dev->iobase + 4);
113 outb_p(outval >> 9, dev->iobase + 6); 113 outb_p(outval >> 9, dev->iobase + 6);
114 outb_p(outval & 0xff, dev->iobase + 8); 114 outb_p(outval & 0xff, dev->iobase + 8);
115 up(&dev->lock); 115 mutex_unlock(&dev->lock);
116 116
117 return 0; 117 return 0;
118} 118}
@@ -337,7 +337,7 @@ static int __init typhoon_init(void)
337#endif /* MODULE */ 337#endif /* MODULE */
338 338
339 printk(KERN_INFO BANNER); 339 printk(KERN_INFO BANNER);
340 init_MUTEX(&typhoon_unit.lock); 340 mutex_init(&typhoon_unit.lock);
341 io = typhoon_unit.iobase; 341 io = typhoon_unit.iobase;
342 if (!request_region(io, 8, "typhoon")) { 342 if (!request_region(io, 8, "typhoon")) {
343 printk(KERN_ERR "radio-typhoon: port 0x%x already in use\n", 343 printk(KERN_ERR "radio-typhoon: port 0x%x already in use\n",
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index d590e80c922e..7bf1a4264891 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -48,7 +48,7 @@ struct zol_device {
48 unsigned long curfreq; 48 unsigned long curfreq;
49 int muted; 49 int muted;
50 unsigned int stereo; 50 unsigned int stereo;
51 struct semaphore lock; 51 struct mutex lock;
52}; 52};
53 53
54static int zol_setvol(struct zol_device *dev, int vol) 54static int zol_setvol(struct zol_device *dev, int vol)
@@ -57,30 +57,30 @@ static int zol_setvol(struct zol_device *dev, int vol)
57 if (dev->muted) 57 if (dev->muted)
58 return 0; 58 return 0;
59 59
60 down(&dev->lock); 60 mutex_lock(&dev->lock);
61 if (vol == 0) { 61 if (vol == 0) {
62 outb(0, io); 62 outb(0, io);
63 outb(0, io); 63 outb(0, io);
64 inb(io + 3); /* Zoltrix needs to be read to confirm */ 64 inb(io + 3); /* Zoltrix needs to be read to confirm */
65 up(&dev->lock); 65 mutex_unlock(&dev->lock);
66 return 0; 66 return 0;
67 } 67 }
68 68
69 outb(dev->curvol-1, io); 69 outb(dev->curvol-1, io);
70 msleep(10); 70 msleep(10);
71 inb(io + 2); 71 inb(io + 2);
72 up(&dev->lock); 72 mutex_unlock(&dev->lock);
73 return 0; 73 return 0;
74} 74}
75 75
76static void zol_mute(struct zol_device *dev) 76static void zol_mute(struct zol_device *dev)
77{ 77{
78 dev->muted = 1; 78 dev->muted = 1;
79 down(&dev->lock); 79 mutex_lock(&dev->lock);
80 outb(0, io); 80 outb(0, io);
81 outb(0, io); 81 outb(0, io);
82 inb(io + 3); /* Zoltrix needs to be read to confirm */ 82 inb(io + 3); /* Zoltrix needs to be read to confirm */
83 up(&dev->lock); 83 mutex_unlock(&dev->lock);
84} 84}
85 85
86static void zol_unmute(struct zol_device *dev) 86static void zol_unmute(struct zol_device *dev)
@@ -104,7 +104,7 @@ static int zol_setfreq(struct zol_device *dev, unsigned long freq)
104 bitmask = 0xc480402c10080000ull; 104 bitmask = 0xc480402c10080000ull;
105 i = 45; 105 i = 45;
106 106
107 down(&dev->lock); 107 mutex_lock(&dev->lock);
108 108
109 outb(0, io); 109 outb(0, io);
110 outb(0, io); 110 outb(0, io);
@@ -149,7 +149,7 @@ static int zol_setfreq(struct zol_device *dev, unsigned long freq)
149 udelay(1000); 149 udelay(1000);
150 } 150 }
151 151
152 up(&dev->lock); 152 mutex_unlock(&dev->lock);
153 153
154 if(!dev->muted) 154 if(!dev->muted)
155 { 155 {
@@ -164,7 +164,7 @@ static int zol_getsigstr(struct zol_device *dev)
164{ 164{
165 int a, b; 165 int a, b;
166 166
167 down(&dev->lock); 167 mutex_lock(&dev->lock);
168 outb(0x00, io); /* This stuff I found to do nothing */ 168 outb(0x00, io); /* This stuff I found to do nothing */
169 outb(dev->curvol, io); 169 outb(dev->curvol, io);
170 msleep(20); 170 msleep(20);
@@ -173,7 +173,7 @@ static int zol_getsigstr(struct zol_device *dev)
173 msleep(10); 173 msleep(10);
174 b = inb(io); 174 b = inb(io);
175 175
176 up(&dev->lock); 176 mutex_unlock(&dev->lock);
177 177
178 if (a != b) 178 if (a != b)
179 return (0); 179 return (0);
@@ -188,7 +188,7 @@ static int zol_is_stereo (struct zol_device *dev)
188{ 188{
189 int x1, x2; 189 int x1, x2;
190 190
191 down(&dev->lock); 191 mutex_lock(&dev->lock);
192 192
193 outb(0x00, io); 193 outb(0x00, io);
194 outb(dev->curvol, io); 194 outb(dev->curvol, io);
@@ -198,7 +198,7 @@ static int zol_is_stereo (struct zol_device *dev)
198 msleep(10); 198 msleep(10);
199 x2 = inb(io); 199 x2 = inb(io);
200 200
201 up(&dev->lock); 201 mutex_unlock(&dev->lock);
202 202
203 if ((x1 == x2) && (x1 == 0xcf)) 203 if ((x1 == x2) && (x1 == 0xcf))
204 return 1; 204 return 1;
@@ -350,7 +350,7 @@ static int __init zoltrix_init(void)
350 } 350 }
351 printk(KERN_INFO "Zoltrix Radio Plus card driver.\n"); 351 printk(KERN_INFO "Zoltrix Radio Plus card driver.\n");
352 352
353 init_MUTEX(&zoltrix_unit.lock); 353 mutex_init(&zoltrix_unit.lock);
354 354
355 /* mute card - prevents noisy bootups */ 355 /* mute card - prevents noisy bootups */
356 356
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index d82c8a30ba44..c622a4da5663 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -26,6 +26,7 @@ config VIDEO_BT848
26 select VIDEO_IR 26 select VIDEO_IR
27 select VIDEO_TUNER 27 select VIDEO_TUNER
28 select VIDEO_TVEEPROM 28 select VIDEO_TVEEPROM
29 select VIDEO_MSP3400
29 ---help--- 30 ---help---
30 Support for BT848 based frame grabber/overlay boards. This includes 31 Support for BT848 based frame grabber/overlay boards. This includes
31 the Miro, Hauppauge and STB boards. Please read the material in 32 the Miro, Hauppauge and STB boards. Please read the material in
@@ -142,6 +143,8 @@ config VIDEO_CPIA_USB
142 otherwise say N. This will not work with the Creative Webcam III. 143 otherwise say N. This will not work with the Creative Webcam III.
143 It is also available as a module (cpia_usb). 144 It is also available as a module (cpia_usb).
144 145
146source "drivers/media/video/cpia2/Kconfig"
147
145config VIDEO_SAA5246A 148config VIDEO_SAA5246A
146 tristate "SAA5246A, SAA5281 Teletext processor" 149 tristate "SAA5246A, SAA5281 Teletext processor"
147 depends on VIDEO_DEV && I2C 150 depends on VIDEO_DEV && I2C
@@ -339,18 +342,53 @@ config VIDEO_M32R_AR_M64278
339 Say Y here to use the Renesas M64278E-800 camera module, 342 Say Y here to use the Renesas M64278E-800 camera module,
340 which supports VGA(640x480 pixcels) size of images. 343 which supports VGA(640x480 pixcels) size of images.
341 344
342config VIDEO_AUDIO_DECODER 345config VIDEO_MSP3400
343 tristate "Add support for additional audio chipsets" 346 tristate "Micronas MSP34xx audio decoders"
347 depends on VIDEO_DEV && I2C
348 ---help---
349 Support for the Micronas MSP34xx series of audio decoders.
350
351 To compile this driver as a module, choose M here: the
352 module will be called msp3400
353
354config VIDEO_CS53L32A
355 tristate "Cirrus Logic CS53L32A audio ADC"
344 depends on VIDEO_DEV && I2C && EXPERIMENTAL 356 depends on VIDEO_DEV && I2C && EXPERIMENTAL
345 ---help--- 357 ---help---
346 Say Y here to compile drivers for WM8775 and CS53L32A audio 358 Support for the Cirrus Logic CS53L32A low voltage
347 decoders. 359 stereo A/D converter.
348 360
349config VIDEO_DECODER 361 To compile this driver as a module, choose M here: the
350 tristate "Add support for additional video chipsets" 362 module will be called cs53l32a
363
364config VIDEO_WM8775
365 tristate "Wolfson Microelectronics WM8775 audio ADC"
351 depends on VIDEO_DEV && I2C && EXPERIMENTAL 366 depends on VIDEO_DEV && I2C && EXPERIMENTAL
352 ---help--- 367 ---help---
353 Say Y here to compile drivers for SAA7115, SAA7127 and CX25840 368 Support for the Wolfson Microelectronics WM8775
354 video decoders. 369 high performance stereo A/D Converter.
370
371 To compile this driver as a module, choose M here: the
372 module will be called wm8775
373
374source "drivers/media/video/cx25840/Kconfig"
375
376config VIDEO_SAA711X
377 tristate "Philips SAA7113/4/5 video decoders"
378 depends on VIDEO_DEV && I2C && EXPERIMENTAL
379 ---help---
380 Support for the Philips SAA7113/4/5 video decoders.
381
382 To compile this driver as a module, choose M here: the
383 module will be called saa7115
384
385config VIDEO_SAA7127
386 tristate "Philips SAA7127/9 digital video encoders"
387 depends on VIDEO_DEV && I2C && EXPERIMENTAL
388 ---help---
389 Support for the Philips SAA7127/9 digital video encoders.
390
391 To compile this driver as a module, choose M here: the
392 module will be called saa7127
355 393
356endmenu 394endmenu
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index faf728366c4e..f2bd4c0c4f10 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -15,7 +15,7 @@ msp3400-objs := msp3400-driver.o msp3400-kthreads.o
15 15
16obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o compat_ioctl32.o 16obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o compat_ioctl32.o
17 17
18obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \ 18obj-$(CONFIG_VIDEO_BT848) += bttv.o tvaudio.o \
19 tda7432.o tda9875.o ir-kbd-i2c.o 19 tda7432.o tda9875.o ir-kbd-i2c.o
20obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o 20obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o
21 21
@@ -44,10 +44,13 @@ obj-$(CONFIG_VIDEO_MEYE) += meye.o
44obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/ 44obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/
45obj-$(CONFIG_VIDEO_CX88) += cx88/ 45obj-$(CONFIG_VIDEO_CX88) += cx88/
46obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ 46obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
47obj-$(CONFIG_VIDEO_EM28XX) += saa711x.o tvp5150.o 47obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o
48obj-$(CONFIG_VIDEO_AUDIO_DECODER) += wm8775.o cs53l32a.o 48obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
49obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
50obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
49obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip/ 51obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip/
50obj-$(CONFIG_VIDEO_MXB) += saa7111.o tuner.o tda9840.o tea6415c.o tea6420.o mxb.o 52obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
53obj-$(CONFIG_VIDEO_MXB) += saa7111.o tda9840.o tea6415c.o tea6420.o mxb.o
51obj-$(CONFIG_VIDEO_HEXIUM_ORION) += hexium_orion.o 54obj-$(CONFIG_VIDEO_HEXIUM_ORION) += hexium_orion.o
52obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o 55obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o
53obj-$(CONFIG_VIDEO_DPC) += saa7111.o dpc7146.o 56obj-$(CONFIG_VIDEO_DPC) += saa7111.o dpc7146.o
@@ -61,6 +64,8 @@ obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
61 64
62obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o 65obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
63 66
64obj-$(CONFIG_VIDEO_DECODER) += saa7115.o cx25840/ saa7127.o 67obj-$(CONFIG_VIDEO_CX25840) += cx25840/
68obj-$(CONFIG_VIDEO_SAA711X) += saa7115.o
69obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
65 70
66EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core 71EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 994b75fe165a..c586f64b6b7f 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -31,8 +31,8 @@
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/videodev.h> 33#include <linux/videodev.h>
34#include <linux/mutex.h>
34 35
35#include <asm/semaphore.h>
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/m32r.h> 37#include <asm/m32r.h>
38#include <asm/io.h> 38#include <asm/io.h>
@@ -117,7 +117,7 @@ struct ar_device {
117 int width, height; 117 int width, height;
118 int frame_bytes, line_bytes; 118 int frame_bytes, line_bytes;
119 wait_queue_head_t wait; 119 wait_queue_head_t wait;
120 struct semaphore lock; 120 struct mutex lock;
121}; 121};
122 122
123static int video_nr = -1; /* video device number (first free) */ 123static int video_nr = -1; /* video device number (first free) */
@@ -288,7 +288,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
288 if (ar->mode == AR_MODE_NORMAL) 288 if (ar->mode == AR_MODE_NORMAL)
289 arvcr1 |= ARVCR1_NORMAL; 289 arvcr1 |= ARVCR1_NORMAL;
290 290
291 down(&ar->lock); 291 mutex_lock(&ar->lock);
292 292
293#if USE_INT 293#if USE_INT
294 local_irq_save(flags); 294 local_irq_save(flags);
@@ -392,7 +392,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
392 } 392 }
393 DEBUG(1, "ret = %d\n", ret); 393 DEBUG(1, "ret = %d\n", ret);
394out_up: 394out_up:
395 up(&ar->lock); 395 mutex_unlock(&ar->lock);
396 return ret; 396 return ret;
397} 397}
398 398
@@ -456,7 +456,7 @@ static int ar_do_ioctl(struct inode *inode, struct file *file,
456 (w->width != AR_WIDTH_QVGA || w->height != AR_HEIGHT_QVGA)) 456 (w->width != AR_WIDTH_QVGA || w->height != AR_HEIGHT_QVGA))
457 return -EINVAL; 457 return -EINVAL;
458 458
459 down(&ar->lock); 459 mutex_lock(&ar->lock);
460 ar->width = w->width; 460 ar->width = w->width;
461 ar->height = w->height; 461 ar->height = w->height;
462 if (ar->width == AR_WIDTH_VGA) { 462 if (ar->width == AR_WIDTH_VGA) {
@@ -473,7 +473,7 @@ static int ar_do_ioctl(struct inode *inode, struct file *file,
473 ar->line_bytes = AR_LINE_BYTES_QVGA; 473 ar->line_bytes = AR_LINE_BYTES_QVGA;
474 ar->mode = AR_MODE_INTERLACE; 474 ar->mode = AR_MODE_INTERLACE;
475 } 475 }
476 up(&ar->lock); 476 mutex_unlock(&ar->lock);
477 return 0; 477 return 0;
478 } 478 }
479 case VIDIOCGFBUF: 479 case VIDIOCGFBUF:
@@ -734,7 +734,7 @@ static int ar_initialize(struct video_device *dev)
734void ar_release(struct video_device *vfd) 734void ar_release(struct video_device *vfd)
735{ 735{
736 struct ar_device *ar = vfd->priv; 736 struct ar_device *ar = vfd->priv;
737 down(&ar->lock); 737 mutex_lock(&ar->lock);
738 video_device_release(vfd); 738 video_device_release(vfd);
739} 739}
740 740
@@ -824,7 +824,7 @@ static int __init ar_init(void)
824 ar->line_bytes = AR_LINE_BYTES_QVGA; 824 ar->line_bytes = AR_LINE_BYTES_QVGA;
825 ar->mode = AR_MODE_INTERLACE; 825 ar->mode = AR_MODE_INTERLACE;
826 } 826 }
827 init_MUTEX(&ar->lock); 827 mutex_init(&ar->lock);
828 init_waitqueue_head(&ar->wait); 828 init_waitqueue_head(&ar->wait);
829 829
830#if USE_INT 830#if USE_INT
diff --git a/drivers/media/video/bttv-cards.c b/drivers/media/video/bttv-cards.c
index 9749d6ed6231..abfa6ad857a0 100644
--- a/drivers/media/video/bttv-cards.c
+++ b/drivers/media/video/bttv-cards.c
@@ -137,6 +137,8 @@ MODULE_PARM_DESC(card,"specify TV/grabber card model, see CARDLIST file for a li
137MODULE_PARM_DESC(pll,"specify installed crystal (0=none, 28=28 MHz, 35=35 MHz)"); 137MODULE_PARM_DESC(pll,"specify installed crystal (0=none, 28=28 MHz, 35=35 MHz)");
138MODULE_PARM_DESC(tuner,"specify installed tuner type"); 138MODULE_PARM_DESC(tuner,"specify installed tuner type");
139MODULE_PARM_DESC(autoload,"automatically load i2c modules like tuner.o, default is 1 (yes)"); 139MODULE_PARM_DESC(autoload,"automatically load i2c modules like tuner.o, default is 1 (yes)");
140MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
141 " [some VIA/SIS chipsets are known to have problem with overlay]");
140 142
141/* ----------------------------------------------------------------------- */ 143/* ----------------------------------------------------------------------- */
142/* list of card IDs for bt878+ cards */ 144/* list of card IDs for bt878+ cards */
@@ -275,7 +277,6 @@ static struct CARD {
275 { 0x03116000, BTTV_BOARD_SENSORAY311, "Sensoray 311" }, 277 { 0x03116000, BTTV_BOARD_SENSORAY311, "Sensoray 311" },
276 { 0x00790e11, BTTV_BOARD_WINDVR, "Canopus WinDVR PCI" }, 278 { 0x00790e11, BTTV_BOARD_WINDVR, "Canopus WinDVR PCI" },
277 { 0xa0fca1a0, BTTV_BOARD_ZOLTRIX, "Face to Face Tvmax" }, 279 { 0xa0fca1a0, BTTV_BOARD_ZOLTRIX, "Face to Face Tvmax" },
278 { 0x20007063, BTTV_BOARD_PC_HDTV, "pcHDTV HD-2000 TV"},
279 { 0x82b2aa6a, BTTV_BOARD_SIMUS_GVC1100, "SIMUS GVC1100" }, 280 { 0x82b2aa6a, BTTV_BOARD_SIMUS_GVC1100, "SIMUS GVC1100" },
280 { 0x146caa0c, BTTV_BOARD_PV951, "ituner spectra8" }, 281 { 0x146caa0c, BTTV_BOARD_PV951, "ituner spectra8" },
281 { 0x200a1295, BTTV_BOARD_PXC200, "ImageNation PXC200A" }, 282 { 0x200a1295, BTTV_BOARD_PXC200, "ImageNation PXC200A" },
@@ -297,13 +298,14 @@ static struct CARD {
297 * { 0x13eb0070, BTTV_BOARD_HAUPPAUGE_IMPACTVCB, "Hauppauge ImpactVCB" }, */ 298 * { 0x13eb0070, BTTV_BOARD_HAUPPAUGE_IMPACTVCB, "Hauppauge ImpactVCB" }, */
298 299
299 /* DVB cards (using pci function .1 for mpeg data xfer) */ 300 /* DVB cards (using pci function .1 for mpeg data xfer) */
300 { 0x01010071, BTTV_BOARD_NEBULA_DIGITV, "Nebula Electronics DigiTV" },
301 { 0x07611461, BTTV_BOARD_AVDVBT_761, "AverMedia AverTV DVB-T 761" },
302 { 0x001c11bd, BTTV_BOARD_PINNACLESAT, "Pinnacle PCTV Sat" }, 301 { 0x001c11bd, BTTV_BOARD_PINNACLESAT, "Pinnacle PCTV Sat" },
302 { 0x01010071, BTTV_BOARD_NEBULA_DIGITV, "Nebula Electronics DigiTV" },
303 { 0x20007063, BTTV_BOARD_PC_HDTV, "pcHDTV HD-2000 TV"},
303 { 0x002611bd, BTTV_BOARD_TWINHAN_DST, "Pinnacle PCTV SAT CI" }, 304 { 0x002611bd, BTTV_BOARD_TWINHAN_DST, "Pinnacle PCTV SAT CI" },
304 { 0x00011822, BTTV_BOARD_TWINHAN_DST, "Twinhan VisionPlus DVB" }, 305 { 0x00011822, BTTV_BOARD_TWINHAN_DST, "Twinhan VisionPlus DVB" },
305 { 0xfc00270f, BTTV_BOARD_TWINHAN_DST, "ChainTech digitop DST-1000 DVB-S" }, 306 { 0xfc00270f, BTTV_BOARD_TWINHAN_DST, "ChainTech digitop DST-1000 DVB-S" },
306 { 0x07711461, BTTV_BOARD_AVDVBT_771, "AVermedia AverTV DVB-T 771" }, 307 { 0x07711461, BTTV_BOARD_AVDVBT_771, "AVermedia AverTV DVB-T 771" },
308 { 0x07611461, BTTV_BOARD_AVDVBT_761, "AverMedia AverTV DVB-T 761" },
307 { 0xdb1018ac, BTTV_BOARD_DVICO_DVBT_LITE, "DViCO FusionHDTV DVB-T Lite" }, 309 { 0xdb1018ac, BTTV_BOARD_DVICO_DVBT_LITE, "DViCO FusionHDTV DVB-T Lite" },
308 { 0xd50018ac, BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE, "DViCO FusionHDTV 5 Lite" }, 310 { 0xd50018ac, BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE, "DViCO FusionHDTV 5 Lite" },
309 311
@@ -4944,12 +4946,14 @@ void __devinit bttv_check_chipset(void)
4944 if (vsfx) 4946 if (vsfx)
4945 printk(KERN_INFO "bttv: Host bridge needs VSFX enabled.\n"); 4947 printk(KERN_INFO "bttv: Host bridge needs VSFX enabled.\n");
4946 if (pcipci_fail) { 4948 if (pcipci_fail) {
4947 printk(KERN_WARNING "bttv: BT848 and your chipset may not work together.\n"); 4949 printk(KERN_INFO "bttv: bttv and your chipset may not work "
4950 "together.\n");
4948 if (!no_overlay) { 4951 if (!no_overlay) {
4949 printk(KERN_WARNING "bttv: overlay will be disabled.\n"); 4952 printk(KERN_INFO "bttv: overlay will be disabled.\n");
4950 no_overlay = 1; 4953 no_overlay = 1;
4951 } else { 4954 } else {
4952 printk(KERN_WARNING "bttv: overlay forced. Use this option at your own risk.\n"); 4955 printk(KERN_INFO "bttv: overlay forced. Use this "
4956 "option at your own risk.\n");
4953 } 4957 }
4954 } 4958 }
4955 if (UNSET != latency) 4959 if (UNSET != latency)
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index 578b20085082..c0415d6e7fee 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -1965,7 +1965,7 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
1965 BUG(); 1965 BUG();
1966 } 1966 }
1967 1967
1968 down(&fh->cap.lock); 1968 mutex_lock(&fh->cap.lock);
1969 kfree(fh->ov.clips); 1969 kfree(fh->ov.clips);
1970 fh->ov.clips = clips; 1970 fh->ov.clips = clips;
1971 fh->ov.nclips = n; 1971 fh->ov.nclips = n;
@@ -1986,7 +1986,7 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
1986 bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); 1986 bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new);
1987 retval = bttv_switch_overlay(btv,fh,new); 1987 retval = bttv_switch_overlay(btv,fh,new);
1988 } 1988 }
1989 up(&fh->cap.lock); 1989 mutex_unlock(&fh->cap.lock);
1990 return retval; 1990 return retval;
1991} 1991}
1992 1992
@@ -2166,7 +2166,7 @@ static int bttv_s_fmt(struct bttv_fh *fh, struct bttv *btv,
2166 fmt = format_by_fourcc(f->fmt.pix.pixelformat); 2166 fmt = format_by_fourcc(f->fmt.pix.pixelformat);
2167 2167
2168 /* update our state informations */ 2168 /* update our state informations */
2169 down(&fh->cap.lock); 2169 mutex_lock(&fh->cap.lock);
2170 fh->fmt = fmt; 2170 fh->fmt = fmt;
2171 fh->cap.field = f->fmt.pix.field; 2171 fh->cap.field = f->fmt.pix.field;
2172 fh->cap.last = V4L2_FIELD_NONE; 2172 fh->cap.last = V4L2_FIELD_NONE;
@@ -2175,7 +2175,7 @@ static int bttv_s_fmt(struct bttv_fh *fh, struct bttv *btv,
2175 btv->init.fmt = fmt; 2175 btv->init.fmt = fmt;
2176 btv->init.width = f->fmt.pix.width; 2176 btv->init.width = f->fmt.pix.width;
2177 btv->init.height = f->fmt.pix.height; 2177 btv->init.height = f->fmt.pix.height;
2178 up(&fh->cap.lock); 2178 mutex_unlock(&fh->cap.lock);
2179 2179
2180 return 0; 2180 return 0;
2181 } 2181 }
@@ -2282,7 +2282,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2282 fmt = format_by_palette(pic->palette); 2282 fmt = format_by_palette(pic->palette);
2283 if (NULL == fmt) 2283 if (NULL == fmt)
2284 return -EINVAL; 2284 return -EINVAL;
2285 down(&fh->cap.lock); 2285 mutex_lock(&fh->cap.lock);
2286 if (fmt->depth != pic->depth) { 2286 if (fmt->depth != pic->depth) {
2287 retval = -EINVAL; 2287 retval = -EINVAL;
2288 goto fh_unlock_and_return; 2288 goto fh_unlock_and_return;
@@ -2313,7 +2313,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2313 bt848_contrast(btv,pic->contrast); 2313 bt848_contrast(btv,pic->contrast);
2314 bt848_hue(btv,pic->hue); 2314 bt848_hue(btv,pic->hue);
2315 bt848_sat(btv,pic->colour); 2315 bt848_sat(btv,pic->colour);
2316 up(&fh->cap.lock); 2316 mutex_unlock(&fh->cap.lock);
2317 return 0; 2317 return 0;
2318 } 2318 }
2319 2319
@@ -2379,7 +2379,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2379 return -EPERM; 2379 return -EPERM;
2380 end = (unsigned long)fbuf->base + 2380 end = (unsigned long)fbuf->base +
2381 fbuf->height * fbuf->bytesperline; 2381 fbuf->height * fbuf->bytesperline;
2382 down(&fh->cap.lock); 2382 mutex_lock(&fh->cap.lock);
2383 retval = -EINVAL; 2383 retval = -EINVAL;
2384 2384
2385 switch (fbuf->depth) { 2385 switch (fbuf->depth) {
@@ -2417,7 +2417,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2417 btv->fbuf.fmt.bytesperline = fbuf->bytesperline; 2417 btv->fbuf.fmt.bytesperline = fbuf->bytesperline;
2418 else 2418 else
2419 btv->fbuf.fmt.bytesperline = btv->fbuf.fmt.width*fbuf->depth/8; 2419 btv->fbuf.fmt.bytesperline = btv->fbuf.fmt.width*fbuf->depth/8;
2420 up(&fh->cap.lock); 2420 mutex_unlock(&fh->cap.lock);
2421 return 0; 2421 return 0;
2422 } 2422 }
2423 2423
@@ -2440,7 +2440,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2440 if (!check_alloc_btres(btv,fh,RESOURCE_OVERLAY)) 2440 if (!check_alloc_btres(btv,fh,RESOURCE_OVERLAY))
2441 return -EBUSY; 2441 return -EBUSY;
2442 2442
2443 down(&fh->cap.lock); 2443 mutex_lock(&fh->cap.lock);
2444 if (*on) { 2444 if (*on) {
2445 fh->ov.tvnorm = btv->tvnorm; 2445 fh->ov.tvnorm = btv->tvnorm;
2446 new = videobuf_alloc(sizeof(*new)); 2446 new = videobuf_alloc(sizeof(*new));
@@ -2451,7 +2451,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2451 2451
2452 /* switch over */ 2452 /* switch over */
2453 retval = bttv_switch_overlay(btv,fh,new); 2453 retval = bttv_switch_overlay(btv,fh,new);
2454 up(&fh->cap.lock); 2454 mutex_unlock(&fh->cap.lock);
2455 return retval; 2455 return retval;
2456 } 2456 }
2457 2457
@@ -2460,7 +2460,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2460 struct video_mbuf *mbuf = arg; 2460 struct video_mbuf *mbuf = arg;
2461 unsigned int i; 2461 unsigned int i;
2462 2462
2463 down(&fh->cap.lock); 2463 mutex_lock(&fh->cap.lock);
2464 retval = videobuf_mmap_setup(&fh->cap,gbuffers,gbufsize, 2464 retval = videobuf_mmap_setup(&fh->cap,gbuffers,gbufsize,
2465 V4L2_MEMORY_MMAP); 2465 V4L2_MEMORY_MMAP);
2466 if (retval < 0) 2466 if (retval < 0)
@@ -2470,7 +2470,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2470 mbuf->size = gbuffers * gbufsize; 2470 mbuf->size = gbuffers * gbufsize;
2471 for (i = 0; i < gbuffers; i++) 2471 for (i = 0; i < gbuffers; i++)
2472 mbuf->offsets[i] = i * gbufsize; 2472 mbuf->offsets[i] = i * gbufsize;
2473 up(&fh->cap.lock); 2473 mutex_unlock(&fh->cap.lock);
2474 return 0; 2474 return 0;
2475 } 2475 }
2476 case VIDIOCMCAPTURE: 2476 case VIDIOCMCAPTURE:
@@ -2482,7 +2482,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2482 if (vm->frame >= VIDEO_MAX_FRAME) 2482 if (vm->frame >= VIDEO_MAX_FRAME)
2483 return -EINVAL; 2483 return -EINVAL;
2484 2484
2485 down(&fh->cap.lock); 2485 mutex_lock(&fh->cap.lock);
2486 retval = -EINVAL; 2486 retval = -EINVAL;
2487 buf = (struct bttv_buffer *)fh->cap.bufs[vm->frame]; 2487 buf = (struct bttv_buffer *)fh->cap.bufs[vm->frame];
2488 if (NULL == buf) 2488 if (NULL == buf)
@@ -2504,7 +2504,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2504 spin_lock_irqsave(&btv->s_lock,flags); 2504 spin_lock_irqsave(&btv->s_lock,flags);
2505 buffer_queue(&fh->cap,&buf->vb); 2505 buffer_queue(&fh->cap,&buf->vb);
2506 spin_unlock_irqrestore(&btv->s_lock,flags); 2506 spin_unlock_irqrestore(&btv->s_lock,flags);
2507 up(&fh->cap.lock); 2507 mutex_unlock(&fh->cap.lock);
2508 return 0; 2508 return 0;
2509 } 2509 }
2510 case VIDIOCSYNC: 2510 case VIDIOCSYNC:
@@ -2515,7 +2515,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2515 if (*frame >= VIDEO_MAX_FRAME) 2515 if (*frame >= VIDEO_MAX_FRAME)
2516 return -EINVAL; 2516 return -EINVAL;
2517 2517
2518 down(&fh->cap.lock); 2518 mutex_lock(&fh->cap.lock);
2519 retval = -EINVAL; 2519 retval = -EINVAL;
2520 buf = (struct bttv_buffer *)fh->cap.bufs[*frame]; 2520 buf = (struct bttv_buffer *)fh->cap.bufs[*frame];
2521 if (NULL == buf) 2521 if (NULL == buf)
@@ -2535,7 +2535,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2535 retval = -EINVAL; 2535 retval = -EINVAL;
2536 break; 2536 break;
2537 } 2537 }
2538 up(&fh->cap.lock); 2538 mutex_unlock(&fh->cap.lock);
2539 return retval; 2539 return retval;
2540 } 2540 }
2541 2541
@@ -2719,7 +2719,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2719 if (0 == (fmt->flags & FORMAT_FLAGS_PACKED)) 2719 if (0 == (fmt->flags & FORMAT_FLAGS_PACKED))
2720 return -EINVAL; 2720 return -EINVAL;
2721 2721
2722 down(&fh->cap.lock); 2722 mutex_lock(&fh->cap.lock);
2723 retval = -EINVAL; 2723 retval = -EINVAL;
2724 if (fb->flags & V4L2_FBUF_FLAG_OVERLAY) { 2724 if (fb->flags & V4L2_FBUF_FLAG_OVERLAY) {
2725 if (fb->fmt.width > bttv_tvnorms[btv->tvnorm].swidth) 2725 if (fb->fmt.width > bttv_tvnorms[btv->tvnorm].swidth)
@@ -2759,7 +2759,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2759 retval = bttv_switch_overlay(btv,fh,new); 2759 retval = bttv_switch_overlay(btv,fh,new);
2760 } 2760 }
2761 } 2761 }
2762 up(&fh->cap.lock); 2762 mutex_unlock(&fh->cap.lock);
2763 return retval; 2763 return retval;
2764 } 2764 }
2765 2765
@@ -2890,7 +2890,7 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2890 return 0; 2890 return 0;
2891 2891
2892 fh_unlock_and_return: 2892 fh_unlock_and_return:
2893 up(&fh->cap.lock); 2893 mutex_unlock(&fh->cap.lock);
2894 return retval; 2894 return retval;
2895} 2895}
2896 2896
@@ -2957,16 +2957,16 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
2957 buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream); 2957 buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream);
2958 } else { 2958 } else {
2959 /* read() capture */ 2959 /* read() capture */
2960 down(&fh->cap.lock); 2960 mutex_lock(&fh->cap.lock);
2961 if (NULL == fh->cap.read_buf) { 2961 if (NULL == fh->cap.read_buf) {
2962 /* need to capture a new frame */ 2962 /* need to capture a new frame */
2963 if (locked_btres(fh->btv,RESOURCE_VIDEO)) { 2963 if (locked_btres(fh->btv,RESOURCE_VIDEO)) {
2964 up(&fh->cap.lock); 2964 mutex_unlock(&fh->cap.lock);
2965 return POLLERR; 2965 return POLLERR;
2966 } 2966 }
2967 fh->cap.read_buf = videobuf_alloc(fh->cap.msize); 2967 fh->cap.read_buf = videobuf_alloc(fh->cap.msize);
2968 if (NULL == fh->cap.read_buf) { 2968 if (NULL == fh->cap.read_buf) {
2969 up(&fh->cap.lock); 2969 mutex_unlock(&fh->cap.lock);
2970 return POLLERR; 2970 return POLLERR;
2971 } 2971 }
2972 fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR; 2972 fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR;
@@ -2974,13 +2974,13 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
2974 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) { 2974 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) {
2975 kfree (fh->cap.read_buf); 2975 kfree (fh->cap.read_buf);
2976 fh->cap.read_buf = NULL; 2976 fh->cap.read_buf = NULL;
2977 up(&fh->cap.lock); 2977 mutex_unlock(&fh->cap.lock);
2978 return POLLERR; 2978 return POLLERR;
2979 } 2979 }
2980 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf); 2980 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
2981 fh->cap.read_off = 0; 2981 fh->cap.read_off = 0;
2982 } 2982 }
2983 up(&fh->cap.lock); 2983 mutex_unlock(&fh->cap.lock);
2984 buf = (struct bttv_buffer*)fh->cap.read_buf; 2984 buf = (struct bttv_buffer*)fh->cap.read_buf;
2985 } 2985 }
2986 2986
diff --git a/drivers/media/video/bttv-input.c b/drivers/media/video/bttv-input.c
index 221b36e7f392..69efa0e5174d 100644
--- a/drivers/media/video/bttv-input.c
+++ b/drivers/media/video/bttv-input.c
@@ -28,251 +28,6 @@
28#include "bttv.h" 28#include "bttv.h"
29#include "bttvp.h" 29#include "bttvp.h"
30 30
31/* ---------------------------------------------------------------------- */
32
33static IR_KEYTAB_TYPE ir_codes_avermedia[IR_KEYTAB_SIZE] = {
34 [ 34 ] = KEY_KP0,
35 [ 40 ] = KEY_KP1,
36 [ 24 ] = KEY_KP2,
37 [ 56 ] = KEY_KP3,
38 [ 36 ] = KEY_KP4,
39 [ 20 ] = KEY_KP5,
40 [ 52 ] = KEY_KP6,
41 [ 44 ] = KEY_KP7,
42 [ 28 ] = KEY_KP8,
43 [ 60 ] = KEY_KP9,
44
45 [ 48 ] = KEY_EJECTCD, // Unmarked on my controller
46 [ 0 ] = KEY_POWER,
47 [ 18 ] = BTN_LEFT, // DISPLAY/L
48 [ 50 ] = BTN_RIGHT, // LOOP/R
49 [ 10 ] = KEY_MUTE,
50 [ 38 ] = KEY_RECORD,
51 [ 22 ] = KEY_PAUSE,
52 [ 54 ] = KEY_STOP,
53 [ 30 ] = KEY_VOLUMEDOWN,
54 [ 62 ] = KEY_VOLUMEUP,
55
56 [ 32 ] = KEY_TUNER, // TV/FM
57 [ 16 ] = KEY_CD,
58 [ 8 ] = KEY_VIDEO,
59 [ 4 ] = KEY_AUDIO,
60 [ 12 ] = KEY_ZOOM, // full screen
61 [ 2 ] = KEY_INFO, // preview
62 [ 42 ] = KEY_SEARCH, // autoscan
63 [ 26 ] = KEY_STOP, // freeze
64 [ 58 ] = KEY_RECORD, // capture
65 [ 6 ] = KEY_PLAY, // unmarked
66 [ 46 ] = KEY_RED, // unmarked
67 [ 14 ] = KEY_GREEN, // unmarked
68
69 [ 33 ] = KEY_YELLOW, // unmarked
70 [ 17 ] = KEY_CHANNELDOWN,
71 [ 49 ] = KEY_CHANNELUP,
72 [ 1 ] = KEY_BLUE, // unmarked
73};
74
75/* Matt Jesson <dvb@jesson.eclipse.co.uk */
76static IR_KEYTAB_TYPE ir_codes_avermedia_dvbt[IR_KEYTAB_SIZE] = {
77 [ 0x28 ] = KEY_KP0, //'0' / 'enter'
78 [ 0x22 ] = KEY_KP1, //'1'
79 [ 0x12 ] = KEY_KP2, //'2' / 'up arrow'
80 [ 0x32 ] = KEY_KP3, //'3'
81 [ 0x24 ] = KEY_KP4, //'4' / 'left arrow'
82 [ 0x14 ] = KEY_KP5, //'5'
83 [ 0x34 ] = KEY_KP6, //'6' / 'right arrow'
84 [ 0x26 ] = KEY_KP7, //'7'
85 [ 0x16 ] = KEY_KP8, //'8' / 'down arrow'
86 [ 0x36 ] = KEY_KP9, //'9'
87
88 [ 0x20 ] = KEY_LIST, // 'source'
89 [ 0x10 ] = KEY_TEXT, // 'teletext'
90 [ 0x00 ] = KEY_POWER, // 'power'
91 [ 0x04 ] = KEY_AUDIO, // 'audio'
92 [ 0x06 ] = KEY_ZOOM, // 'full screen'
93 [ 0x18 ] = KEY_VIDEO, // 'display'
94 [ 0x38 ] = KEY_SEARCH, // 'loop'
95 [ 0x08 ] = KEY_INFO, // 'preview'
96 [ 0x2a ] = KEY_REWIND, // 'backward <<'
97 [ 0x1a ] = KEY_FASTFORWARD, // 'forward >>'
98 [ 0x3a ] = KEY_RECORD, // 'capture'
99 [ 0x0a ] = KEY_MUTE, // 'mute'
100 [ 0x2c ] = KEY_RECORD, // 'record'
101 [ 0x1c ] = KEY_PAUSE, // 'pause'
102 [ 0x3c ] = KEY_STOP, // 'stop'
103 [ 0x0c ] = KEY_PLAY, // 'play'
104 [ 0x2e ] = KEY_RED, // 'red'
105 [ 0x01 ] = KEY_BLUE, // 'blue' / 'cancel'
106 [ 0x0e ] = KEY_YELLOW, // 'yellow' / 'ok'
107 [ 0x21 ] = KEY_GREEN, // 'green'
108 [ 0x11 ] = KEY_CHANNELDOWN, // 'channel -'
109 [ 0x31 ] = KEY_CHANNELUP, // 'channel +'
110 [ 0x1e ] = KEY_VOLUMEDOWN, // 'volume -'
111 [ 0x3e ] = KEY_VOLUMEUP, // 'volume +'
112};
113
114/* Attila Kondoros <attila.kondoros@chello.hu> */
115static IR_KEYTAB_TYPE ir_codes_apac_viewcomp[IR_KEYTAB_SIZE] = {
116
117 [ 1 ] = KEY_KP1,
118 [ 2 ] = KEY_KP2,
119 [ 3 ] = KEY_KP3,
120 [ 4 ] = KEY_KP4,
121 [ 5 ] = KEY_KP5,
122 [ 6 ] = KEY_KP6,
123 [ 7 ] = KEY_KP7,
124 [ 8 ] = KEY_KP8,
125 [ 9 ] = KEY_KP9,
126 [ 0 ] = KEY_KP0,
127 [ 23 ] = KEY_LAST, // +100
128 [ 10 ] = KEY_LIST, // recall
129
130
131 [ 28 ] = KEY_TUNER, // TV/FM
132 [ 21 ] = KEY_SEARCH, // scan
133 [ 18 ] = KEY_POWER, // power
134 [ 31 ] = KEY_VOLUMEDOWN, // vol up
135 [ 27 ] = KEY_VOLUMEUP, // vol down
136 [ 30 ] = KEY_CHANNELDOWN, // chn up
137 [ 26 ] = KEY_CHANNELUP, // chn down
138
139 [ 17 ] = KEY_VIDEO, // video
140 [ 15 ] = KEY_ZOOM, // full screen
141 [ 19 ] = KEY_MUTE, // mute/unmute
142 [ 16 ] = KEY_TEXT, // min
143
144 [ 13 ] = KEY_STOP, // freeze
145 [ 14 ] = KEY_RECORD, // record
146 [ 29 ] = KEY_PLAYPAUSE, // stop
147 [ 25 ] = KEY_PLAY, // play
148
149 [ 22 ] = KEY_GOTO, // osd
150 [ 20 ] = KEY_REFRESH, // default
151 [ 12 ] = KEY_KPPLUS, // fine tune >>>>
152 [ 24 ] = KEY_KPMINUS // fine tune <<<<
153};
154
155/* ---------------------------------------------------------------------- */
156
157static IR_KEYTAB_TYPE ir_codes_conceptronic[IR_KEYTAB_SIZE] = {
158
159 [ 30 ] = KEY_POWER, // power
160 [ 7 ] = KEY_MEDIA, // source
161 [ 28 ] = KEY_SEARCH, // scan
162
163/* FIXME: duplicate keycodes?
164 *
165 * These four keys seem to share the same GPIO as CH+, CH-, <<< and >>>
166 * The GPIO values are
167 * 6397fb for both "Scan <" and "CH -",
168 * 639ffb for "Scan >" and "CH+",
169 * 6384fb for "Tune <" and "<<<",
170 * 638cfb for "Tune >" and ">>>", regardless of the mask.
171 *
172 * [ 23 ] = KEY_BACK, // fm scan <<
173 * [ 31 ] = KEY_FORWARD, // fm scan >>
174 *
175 * [ 4 ] = KEY_LEFT, // fm tuning <
176 * [ 12 ] = KEY_RIGHT, // fm tuning >
177 *
178 * For now, these four keys are disabled. Pressing them will generate
179 * the CH+/CH-/<<</>>> events
180 */
181
182 [ 3 ] = KEY_TUNER, // TV/FM
183
184 [ 0 ] = KEY_RECORD,
185 [ 8 ] = KEY_STOP,
186 [ 17 ] = KEY_PLAY,
187
188 [ 26 ] = KEY_PLAYPAUSE, // freeze
189 [ 25 ] = KEY_ZOOM, // zoom
190 [ 15 ] = KEY_TEXT, // min
191
192 [ 1 ] = KEY_KP1,
193 [ 11 ] = KEY_KP2,
194 [ 27 ] = KEY_KP3,
195 [ 5 ] = KEY_KP4,
196 [ 9 ] = KEY_KP5,
197 [ 21 ] = KEY_KP6,
198 [ 6 ] = KEY_KP7,
199 [ 10 ] = KEY_KP8,
200 [ 18 ] = KEY_KP9,
201 [ 2 ] = KEY_KP0,
202 [ 16 ] = KEY_LAST, // +100
203 [ 19 ] = KEY_LIST, // recall
204
205 [ 31 ] = KEY_CHANNELUP, // chn down
206 [ 23 ] = KEY_CHANNELDOWN, // chn up
207 [ 22 ] = KEY_VOLUMEUP, // vol down
208 [ 20 ] = KEY_VOLUMEDOWN, // vol up
209
210 [ 4 ] = KEY_KPMINUS, // <<<
211 [ 14 ] = KEY_SETUP, // function
212 [ 12 ] = KEY_KPPLUS, // >>>
213
214 [ 13 ] = KEY_GOTO, // mts
215 [ 29 ] = KEY_REFRESH, // reset
216 [ 24 ] = KEY_MUTE // mute/unmute
217};
218
219static IR_KEYTAB_TYPE ir_codes_nebula[IR_KEYTAB_SIZE] = {
220 [0x00] = KEY_KP0,
221 [0x01] = KEY_KP1,
222 [0x02] = KEY_KP2,
223 [0x03] = KEY_KP3,
224 [0x04] = KEY_KP4,
225 [0x05] = KEY_KP5,
226 [0x06] = KEY_KP6,
227 [0x07] = KEY_KP7,
228 [0x08] = KEY_KP8,
229 [0x09] = KEY_KP9,
230 [0x0a] = KEY_TV,
231 [0x0b] = KEY_AUX,
232 [0x0c] = KEY_DVD,
233 [0x0d] = KEY_POWER,
234 [0x0e] = KEY_MHP, /* labelled 'Picture' */
235 [0x0f] = KEY_AUDIO,
236 [0x10] = KEY_INFO,
237 [0x11] = KEY_F13, /* 16:9 */
238 [0x12] = KEY_F14, /* 14:9 */
239 [0x13] = KEY_EPG,
240 [0x14] = KEY_EXIT,
241 [0x15] = KEY_MENU,
242 [0x16] = KEY_UP,
243 [0x17] = KEY_DOWN,
244 [0x18] = KEY_LEFT,
245 [0x19] = KEY_RIGHT,
246 [0x1a] = KEY_ENTER,
247 [0x1b] = KEY_CHANNELUP,
248 [0x1c] = KEY_CHANNELDOWN,
249 [0x1d] = KEY_VOLUMEUP,
250 [0x1e] = KEY_VOLUMEDOWN,
251 [0x1f] = KEY_RED,
252 [0x20] = KEY_GREEN,
253 [0x21] = KEY_YELLOW,
254 [0x22] = KEY_BLUE,
255 [0x23] = KEY_SUBTITLE,
256 [0x24] = KEY_F15, /* AD */
257 [0x25] = KEY_TEXT,
258 [0x26] = KEY_MUTE,
259 [0x27] = KEY_REWIND,
260 [0x28] = KEY_STOP,
261 [0x29] = KEY_PLAY,
262 [0x2a] = KEY_FASTFORWARD,
263 [0x2b] = KEY_F16, /* chapter */
264 [0x2c] = KEY_PAUSE,
265 [0x2d] = KEY_PLAY,
266 [0x2e] = KEY_RECORD,
267 [0x2f] = KEY_F17, /* picture in picture */
268 [0x30] = KEY_KPPLUS, /* zoom in */
269 [0x31] = KEY_KPMINUS, /* zoom out */
270 [0x32] = KEY_F18, /* capture */
271 [0x33] = KEY_F19, /* web */
272 [0x34] = KEY_EMAIL,
273 [0x35] = KEY_PHONE,
274 [0x36] = KEY_PC
275};
276 31
277static int debug; 32static int debug;
278module_param(debug, int, 0644); /* debug level (0,1,2) */ 33module_param(debug, int, 0644); /* debug level (0,1,2) */
@@ -573,7 +328,8 @@ int bttv_input_init(struct bttv *btv)
573 ir->polling = 50; // ms 328 ir->polling = 50; // ms
574 break; 329 break;
575 case BTTV_BOARD_CONCEPTRONIC_CTVFMI2: 330 case BTTV_BOARD_CONCEPTRONIC_CTVFMI2:
576 ir_codes = ir_codes_conceptronic; 331 case BTTV_BOARD_CONTVFMI:
332 ir_codes = ir_codes_pixelview;
577 ir->mask_keycode = 0x001F00; 333 ir->mask_keycode = 0x001F00;
578 ir->mask_keyup = 0x006000; 334 ir->mask_keyup = 0x006000;
579 ir->polling = 50; // ms 335 ir->polling = 50; // ms
diff --git a/drivers/media/video/bttv-risc.c b/drivers/media/video/bttv-risc.c
index b40e9734bf08..344f84e9af04 100644
--- a/drivers/media/video/bttv-risc.c
+++ b/drivers/media/video/bttv-risc.c
@@ -51,8 +51,10 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
51 int rc; 51 int rc;
52 52
53 /* estimate risc mem: worst case is one write per page border + 53 /* estimate risc mem: worst case is one write per page border +
54 one write per scan line + sync + jump (all 2 dwords) */ 54 one write per scan line + sync + jump (all 2 dwords). padding
55 instructions = (bpl * lines) / PAGE_SIZE + lines; 55 can cause next bpl to start close to a page border. First DMA
56 region may be smaller than PAGE_SIZE */
57 instructions = 1 + ((bpl + padding) * lines) / PAGE_SIZE + lines;
56 instructions += 2; 58 instructions += 2;
57 if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions*8)) < 0) 59 if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions*8)) < 0)
58 return rc; 60 return rc;
@@ -104,7 +106,7 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
104 106
105 /* save pointer to jmp instruction address */ 107 /* save pointer to jmp instruction address */
106 risc->jmp = rp; 108 risc->jmp = rp;
107 BUG_ON((risc->jmp - risc->cpu + 2) / 4 > risc->size); 109 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
108 return 0; 110 return 0;
109} 111}
110 112
@@ -222,7 +224,7 @@ bttv_risc_planar(struct bttv *btv, struct btcx_riscmem *risc,
222 224
223 /* save pointer to jmp instruction address */ 225 /* save pointer to jmp instruction address */
224 risc->jmp = rp; 226 risc->jmp = rp;
225 BUG_ON((risc->jmp - risc->cpu + 2) / 4 > risc->size); 227 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
226 return 0; 228 return 0;
227} 229}
228 230
@@ -274,6 +276,8 @@ bttv_risc_overlay(struct bttv *btv, struct btcx_riscmem *risc,
274 if (line > maxy) 276 if (line > maxy)
275 btcx_calc_skips(line, ov->w.width, &maxy, 277 btcx_calc_skips(line, ov->w.width, &maxy,
276 skips, &nskips, ov->clips, ov->nclips); 278 skips, &nskips, ov->clips, ov->nclips);
279 else
280 nskips = 0;
277 281
278 /* write out risc code */ 282 /* write out risc code */
279 for (start = 0, skip = 0; start < ov->w.width; start = end) { 283 for (start = 0, skip = 0; start < ov->w.width; start = end) {
@@ -307,7 +311,7 @@ bttv_risc_overlay(struct bttv *btv, struct btcx_riscmem *risc,
307 311
308 /* save pointer to jmp instruction address */ 312 /* save pointer to jmp instruction address */
309 risc->jmp = rp; 313 risc->jmp = rp;
310 BUG_ON((risc->jmp - risc->cpu + 2) / 4 > risc->size); 314 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
311 kfree(skips); 315 kfree(skips);
312 return 0; 316 return 0;
313} 317}
@@ -507,8 +511,7 @@ bttv_risc_hook(struct bttv *btv, int slot, struct btcx_riscmem *risc,
507void 511void
508bttv_dma_free(struct bttv *btv, struct bttv_buffer *buf) 512bttv_dma_free(struct bttv *btv, struct bttv_buffer *buf)
509{ 513{
510 if (in_interrupt()) 514 BUG_ON(in_interrupt());
511 BUG();
512 videobuf_waiton(&buf->vb,0,0); 515 videobuf_waiton(&buf->vb,0,0);
513 videobuf_dma_pci_unmap(btv->c.pci, &buf->vb.dma); 516 videobuf_dma_pci_unmap(btv->c.pci, &buf->vb.dma);
514 videobuf_dma_free(&buf->vb.dma); 517 videobuf_dma_free(&buf->vb.dma);
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 6bad93ef969f..d97b7d8ac33d 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -73,7 +73,7 @@ OTHER DEALINGS IN THE SOFTWARE.
73#include <linux/parport.h> 73#include <linux/parport.h>
74#include <linux/sched.h> 74#include <linux/sched.h>
75#include <linux/videodev.h> 75#include <linux/videodev.h>
76#include <asm/semaphore.h> 76#include <linux/mutex.h>
77#include <asm/uaccess.h> 77#include <asm/uaccess.h>
78 78
79#include "bw-qcam.h" 79#include "bw-qcam.h"
@@ -168,7 +168,7 @@ static struct qcam_device *qcam_init(struct parport *port)
168 168
169 memcpy(&q->vdev, &qcam_template, sizeof(qcam_template)); 169 memcpy(&q->vdev, &qcam_template, sizeof(qcam_template));
170 170
171 init_MUTEX(&q->lock); 171 mutex_init(&q->lock);
172 172
173 q->port_mode = (QC_ANY | QC_NOTSET); 173 q->port_mode = (QC_ANY | QC_NOTSET);
174 q->width = 320; 174 q->width = 320;
@@ -772,9 +772,9 @@ static int qcam_do_ioctl(struct inode *inode, struct file *file,
772 qcam->whitebal = p->whiteness>>8; 772 qcam->whitebal = p->whiteness>>8;
773 qcam->bpp = p->depth; 773 qcam->bpp = p->depth;
774 774
775 down(&qcam->lock); 775 mutex_lock(&qcam->lock);
776 qc_setscanmode(qcam); 776 qc_setscanmode(qcam);
777 up(&qcam->lock); 777 mutex_unlock(&qcam->lock);
778 qcam->status |= QC_PARAM_CHANGE; 778 qcam->status |= QC_PARAM_CHANGE;
779 779
780 return 0; 780 return 0;
@@ -805,9 +805,9 @@ static int qcam_do_ioctl(struct inode *inode, struct file *file,
805 qcam->height = 240; 805 qcam->height = 240;
806 qcam->transfer_scale = 1; 806 qcam->transfer_scale = 1;
807 } 807 }
808 down(&qcam->lock); 808 mutex_lock(&qcam->lock);
809 qc_setscanmode(qcam); 809 qc_setscanmode(qcam);
810 up(&qcam->lock); 810 mutex_unlock(&qcam->lock);
811 811
812 /* We must update the camera before we grab. We could 812 /* We must update the camera before we grab. We could
813 just have changed the grab size */ 813 just have changed the grab size */
@@ -854,7 +854,7 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
854 int len; 854 int len;
855 parport_claim_or_block(qcam->pdev); 855 parport_claim_or_block(qcam->pdev);
856 856
857 down(&qcam->lock); 857 mutex_lock(&qcam->lock);
858 858
859 qc_reset(qcam); 859 qc_reset(qcam);
860 860
@@ -864,7 +864,7 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
864 864
865 len=qc_capture(qcam, buf,count); 865 len=qc_capture(qcam, buf,count);
866 866
867 up(&qcam->lock); 867 mutex_unlock(&qcam->lock);
868 868
869 parport_release(qcam->pdev); 869 parport_release(qcam->pdev);
870 return len; 870 return len;
diff --git a/drivers/media/video/bw-qcam.h b/drivers/media/video/bw-qcam.h
index 723e8ad9e56a..6701dafbc0da 100644
--- a/drivers/media/video/bw-qcam.h
+++ b/drivers/media/video/bw-qcam.h
@@ -55,7 +55,7 @@ struct qcam_device {
55 struct video_device vdev; 55 struct video_device vdev;
56 struct pardevice *pdev; 56 struct pardevice *pdev;
57 struct parport *pport; 57 struct parport *pport;
58 struct semaphore lock; 58 struct mutex lock;
59 int width, height; 59 int width, height;
60 int bpp; 60 int bpp;
61 int mode; 61 int mode;
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 9976db4f6da8..8211fd8d7cbf 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -34,7 +34,8 @@
34#include <linux/parport.h> 34#include <linux/parport.h>
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/videodev.h> 36#include <linux/videodev.h>
37#include <asm/semaphore.h> 37#include <linux/mutex.h>
38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
40struct qcam_device { 41struct qcam_device {
@@ -47,7 +48,7 @@ struct qcam_device {
47 int contrast, brightness, whitebal; 48 int contrast, brightness, whitebal;
48 int top, left; 49 int top, left;
49 unsigned int bidirectional; 50 unsigned int bidirectional;
50 struct semaphore lock; 51 struct mutex lock;
51}; 52};
52 53
53/* cameras maximum */ 54/* cameras maximum */
@@ -581,11 +582,11 @@ static int qcam_do_ioctl(struct inode *inode, struct file *file,
581 qcam->contrast = p->contrast>>8; 582 qcam->contrast = p->contrast>>8;
582 qcam->whitebal = p->whiteness>>8; 583 qcam->whitebal = p->whiteness>>8;
583 584
584 down(&qcam->lock); 585 mutex_lock(&qcam->lock);
585 parport_claim_or_block(qcam->pdev); 586 parport_claim_or_block(qcam->pdev);
586 qc_setup(qcam); 587 qc_setup(qcam);
587 parport_release(qcam->pdev); 588 parport_release(qcam->pdev);
588 up(&qcam->lock); 589 mutex_unlock(&qcam->lock);
589 return 0; 590 return 0;
590 } 591 }
591 case VIDIOCSWIN: 592 case VIDIOCSWIN:
@@ -628,11 +629,11 @@ static int qcam_do_ioctl(struct inode *inode, struct file *file,
628#endif 629#endif
629 /* Ok we figured out what to use from our 630 /* Ok we figured out what to use from our
630 wide choice */ 631 wide choice */
631 down(&qcam->lock); 632 mutex_lock(&qcam->lock);
632 parport_claim_or_block(qcam->pdev); 633 parport_claim_or_block(qcam->pdev);
633 qc_setup(qcam); 634 qc_setup(qcam);
634 parport_release(qcam->pdev); 635 parport_release(qcam->pdev);
635 up(&qcam->lock); 636 mutex_unlock(&qcam->lock);
636 return 0; 637 return 0;
637 } 638 }
638 case VIDIOCGWIN: 639 case VIDIOCGWIN:
@@ -672,12 +673,12 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
672 struct qcam_device *qcam=(struct qcam_device *)v; 673 struct qcam_device *qcam=(struct qcam_device *)v;
673 int len; 674 int len;
674 675
675 down(&qcam->lock); 676 mutex_lock(&qcam->lock);
676 parport_claim_or_block(qcam->pdev); 677 parport_claim_or_block(qcam->pdev);
677 /* Probably should have a semaphore against multiple users */ 678 /* Probably should have a semaphore against multiple users */
678 len = qc_capture(qcam, buf,count); 679 len = qc_capture(qcam, buf,count);
679 parport_release(qcam->pdev); 680 parport_release(qcam->pdev);
680 up(&qcam->lock); 681 mutex_unlock(&qcam->lock);
681 return len; 682 return len;
682} 683}
683 684
@@ -727,7 +728,7 @@ static struct qcam_device *qcam_init(struct parport *port)
727 728
728 memcpy(&q->vdev, &qcam_template, sizeof(qcam_template)); 729 memcpy(&q->vdev, &qcam_template, sizeof(qcam_template));
729 730
730 init_MUTEX(&q->lock); 731 mutex_init(&q->lock);
731 q->width = q->ccd_width = 320; 732 q->width = q->ccd_width = 320;
732 q->height = q->ccd_height = 240; 733 q->height = q->ccd_height = 240;
733 q->mode = QC_MILLIONS | QC_DECIMATION_1; 734 q->mode = QC_MILLIONS | QC_DECIMATION_1;
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 85d964b5b33c..d93a561e6b80 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -39,7 +39,7 @@
39#include <linux/pagemap.h> 39#include <linux/pagemap.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/semaphore.h> 42#include <linux/mutex.h>
43 43
44#ifdef CONFIG_KMOD 44#ifdef CONFIG_KMOD
45#include <linux/kmod.h> 45#include <linux/kmod.h>
@@ -622,7 +622,7 @@ static int cpia_write_proc(struct file *file, const char __user *buf,
622 622
623 buffer = page; 623 buffer = page;
624 624
625 if (down_interruptible(&cam->param_lock)) 625 if (mutex_lock_interruptible(&cam->param_lock))
626 return -ERESTARTSYS; 626 return -ERESTARTSYS;
627 627
628 /* 628 /*
@@ -1350,7 +1350,7 @@ static int cpia_write_proc(struct file *file, const char __user *buf,
1350 } else 1350 } else
1351 DBG("error: %d\n", retval); 1351 DBG("error: %d\n", retval);
1352 1352
1353 up(&cam->param_lock); 1353 mutex_unlock(&cam->param_lock);
1354 1354
1355out: 1355out:
1356 free_page((unsigned long)page); 1356 free_page((unsigned long)page);
@@ -1664,7 +1664,7 @@ static int do_command(struct cam_data *cam, u16 command, u8 a, u8 b, u8 c, u8 d)
1664 case CPIA_COMMAND_GetColourParams: 1664 case CPIA_COMMAND_GetColourParams:
1665 case CPIA_COMMAND_GetColourBalance: 1665 case CPIA_COMMAND_GetColourBalance:
1666 case CPIA_COMMAND_GetExposure: 1666 case CPIA_COMMAND_GetExposure:
1667 down(&cam->param_lock); 1667 mutex_lock(&cam->param_lock);
1668 datasize=8; 1668 datasize=8;
1669 break; 1669 break;
1670 case CPIA_COMMAND_ReadMCPorts: 1670 case CPIA_COMMAND_ReadMCPorts:
@@ -1691,7 +1691,7 @@ static int do_command(struct cam_data *cam, u16 command, u8 a, u8 b, u8 c, u8 d)
1691 if (command == CPIA_COMMAND_GetColourParams || 1691 if (command == CPIA_COMMAND_GetColourParams ||
1692 command == CPIA_COMMAND_GetColourBalance || 1692 command == CPIA_COMMAND_GetColourBalance ||
1693 command == CPIA_COMMAND_GetExposure) 1693 command == CPIA_COMMAND_GetExposure)
1694 up(&cam->param_lock); 1694 mutex_unlock(&cam->param_lock);
1695 } else { 1695 } else {
1696 switch(command) { 1696 switch(command) {
1697 case CPIA_COMMAND_GetCPIAVersion: 1697 case CPIA_COMMAND_GetCPIAVersion:
@@ -1726,13 +1726,13 @@ static int do_command(struct cam_data *cam, u16 command, u8 a, u8 b, u8 c, u8 d)
1726 cam->params.colourParams.brightness = data[0]; 1726 cam->params.colourParams.brightness = data[0];
1727 cam->params.colourParams.contrast = data[1]; 1727 cam->params.colourParams.contrast = data[1];
1728 cam->params.colourParams.saturation = data[2]; 1728 cam->params.colourParams.saturation = data[2];
1729 up(&cam->param_lock); 1729 mutex_unlock(&cam->param_lock);
1730 break; 1730 break;
1731 case CPIA_COMMAND_GetColourBalance: 1731 case CPIA_COMMAND_GetColourBalance:
1732 cam->params.colourBalance.redGain = data[0]; 1732 cam->params.colourBalance.redGain = data[0];
1733 cam->params.colourBalance.greenGain = data[1]; 1733 cam->params.colourBalance.greenGain = data[1];
1734 cam->params.colourBalance.blueGain = data[2]; 1734 cam->params.colourBalance.blueGain = data[2];
1735 up(&cam->param_lock); 1735 mutex_unlock(&cam->param_lock);
1736 break; 1736 break;
1737 case CPIA_COMMAND_GetExposure: 1737 case CPIA_COMMAND_GetExposure:
1738 cam->params.exposure.gain = data[0]; 1738 cam->params.exposure.gain = data[0];
@@ -1743,7 +1743,7 @@ static int do_command(struct cam_data *cam, u16 command, u8 a, u8 b, u8 c, u8 d)
1743 cam->params.exposure.green1Comp = data[5]; 1743 cam->params.exposure.green1Comp = data[5];
1744 cam->params.exposure.green2Comp = data[6]; 1744 cam->params.exposure.green2Comp = data[6];
1745 cam->params.exposure.blueComp = data[7]; 1745 cam->params.exposure.blueComp = data[7];
1746 up(&cam->param_lock); 1746 mutex_unlock(&cam->param_lock);
1747 break; 1747 break;
1748 1748
1749 case CPIA_COMMAND_ReadMCPorts: 1749 case CPIA_COMMAND_ReadMCPorts:
@@ -2059,7 +2059,7 @@ static int parse_picture(struct cam_data *cam, int size)
2059 int rows, cols, linesize, subsample_422; 2059 int rows, cols, linesize, subsample_422;
2060 2060
2061 /* make sure params don't change while we are decoding */ 2061 /* make sure params don't change while we are decoding */
2062 down(&cam->param_lock); 2062 mutex_lock(&cam->param_lock);
2063 2063
2064 obuf = cam->decompressed_frame.data; 2064 obuf = cam->decompressed_frame.data;
2065 end_obuf = obuf+CPIA_MAX_FRAME_SIZE; 2065 end_obuf = obuf+CPIA_MAX_FRAME_SIZE;
@@ -2069,26 +2069,26 @@ static int parse_picture(struct cam_data *cam, int size)
2069 2069
2070 if ((ibuf[0] != MAGIC_0) || (ibuf[1] != MAGIC_1)) { 2070 if ((ibuf[0] != MAGIC_0) || (ibuf[1] != MAGIC_1)) {
2071 LOG("header not found\n"); 2071 LOG("header not found\n");
2072 up(&cam->param_lock); 2072 mutex_unlock(&cam->param_lock);
2073 return -1; 2073 return -1;
2074 } 2074 }
2075 2075
2076 if ((ibuf[16] != VIDEOSIZE_QCIF) && (ibuf[16] != VIDEOSIZE_CIF)) { 2076 if ((ibuf[16] != VIDEOSIZE_QCIF) && (ibuf[16] != VIDEOSIZE_CIF)) {
2077 LOG("wrong video size\n"); 2077 LOG("wrong video size\n");
2078 up(&cam->param_lock); 2078 mutex_unlock(&cam->param_lock);
2079 return -1; 2079 return -1;
2080 } 2080 }
2081 2081
2082 if (ibuf[17] != SUBSAMPLE_420 && ibuf[17] != SUBSAMPLE_422) { 2082 if (ibuf[17] != SUBSAMPLE_420 && ibuf[17] != SUBSAMPLE_422) {
2083 LOG("illegal subtype %d\n",ibuf[17]); 2083 LOG("illegal subtype %d\n",ibuf[17]);
2084 up(&cam->param_lock); 2084 mutex_unlock(&cam->param_lock);
2085 return -1; 2085 return -1;
2086 } 2086 }
2087 subsample_422 = ibuf[17] == SUBSAMPLE_422; 2087 subsample_422 = ibuf[17] == SUBSAMPLE_422;
2088 2088
2089 if (ibuf[18] != YUVORDER_YUYV && ibuf[18] != YUVORDER_UYVY) { 2089 if (ibuf[18] != YUVORDER_YUYV && ibuf[18] != YUVORDER_UYVY) {
2090 LOG("illegal yuvorder %d\n",ibuf[18]); 2090 LOG("illegal yuvorder %d\n",ibuf[18]);
2091 up(&cam->param_lock); 2091 mutex_unlock(&cam->param_lock);
2092 return -1; 2092 return -1;
2093 } 2093 }
2094 in_uyvy = ibuf[18] == YUVORDER_UYVY; 2094 in_uyvy = ibuf[18] == YUVORDER_UYVY;
@@ -2098,7 +2098,7 @@ static int parse_picture(struct cam_data *cam, int size)
2098 (ibuf[26] != cam->params.roi.rowStart) || 2098 (ibuf[26] != cam->params.roi.rowStart) ||
2099 (ibuf[27] != cam->params.roi.rowEnd)) { 2099 (ibuf[27] != cam->params.roi.rowEnd)) {
2100 LOG("ROI mismatch\n"); 2100 LOG("ROI mismatch\n");
2101 up(&cam->param_lock); 2101 mutex_unlock(&cam->param_lock);
2102 return -1; 2102 return -1;
2103 } 2103 }
2104 cols = 8*(ibuf[25] - ibuf[24]); 2104 cols = 8*(ibuf[25] - ibuf[24]);
@@ -2107,14 +2107,14 @@ static int parse_picture(struct cam_data *cam, int size)
2107 2107
2108 if ((ibuf[28] != NOT_COMPRESSED) && (ibuf[28] != COMPRESSED)) { 2108 if ((ibuf[28] != NOT_COMPRESSED) && (ibuf[28] != COMPRESSED)) {
2109 LOG("illegal compression %d\n",ibuf[28]); 2109 LOG("illegal compression %d\n",ibuf[28]);
2110 up(&cam->param_lock); 2110 mutex_unlock(&cam->param_lock);
2111 return -1; 2111 return -1;
2112 } 2112 }
2113 compressed = (ibuf[28] == COMPRESSED); 2113 compressed = (ibuf[28] == COMPRESSED);
2114 2114
2115 if (ibuf[29] != NO_DECIMATION && ibuf[29] != DECIMATION_ENAB) { 2115 if (ibuf[29] != NO_DECIMATION && ibuf[29] != DECIMATION_ENAB) {
2116 LOG("illegal decimation %d\n",ibuf[29]); 2116 LOG("illegal decimation %d\n",ibuf[29]);
2117 up(&cam->param_lock); 2117 mutex_unlock(&cam->param_lock);
2118 return -1; 2118 return -1;
2119 } 2119 }
2120 decimation = (ibuf[29] == DECIMATION_ENAB); 2120 decimation = (ibuf[29] == DECIMATION_ENAB);
@@ -2130,7 +2130,7 @@ static int parse_picture(struct cam_data *cam, int size)
2130 cam->params.status.vpStatus = ibuf[38]; 2130 cam->params.status.vpStatus = ibuf[38];
2131 cam->params.status.errorCode = ibuf[39]; 2131 cam->params.status.errorCode = ibuf[39];
2132 cam->fps = ibuf[41]; 2132 cam->fps = ibuf[41];
2133 up(&cam->param_lock); 2133 mutex_unlock(&cam->param_lock);
2134 2134
2135 linesize = skipcount(cols, out_fmt); 2135 linesize = skipcount(cols, out_fmt);
2136 ibuf += FRAME_HEADER_SIZE; 2136 ibuf += FRAME_HEADER_SIZE;
@@ -2271,9 +2271,9 @@ static int find_over_exposure(int brightness)
2271/* update various camera modes and settings */ 2271/* update various camera modes and settings */
2272static void dispatch_commands(struct cam_data *cam) 2272static void dispatch_commands(struct cam_data *cam)
2273{ 2273{
2274 down(&cam->param_lock); 2274 mutex_lock(&cam->param_lock);
2275 if (cam->cmd_queue==COMMAND_NONE) { 2275 if (cam->cmd_queue==COMMAND_NONE) {
2276 up(&cam->param_lock); 2276 mutex_unlock(&cam->param_lock);
2277 return; 2277 return;
2278 } 2278 }
2279 DEB_BYTE(cam->cmd_queue); 2279 DEB_BYTE(cam->cmd_queue);
@@ -2415,7 +2415,7 @@ static void dispatch_commands(struct cam_data *cam)
2415 } 2415 }
2416 2416
2417 cam->cmd_queue = COMMAND_NONE; 2417 cam->cmd_queue = COMMAND_NONE;
2418 up(&cam->param_lock); 2418 mutex_unlock(&cam->param_lock);
2419 return; 2419 return;
2420} 2420}
2421 2421
@@ -2562,7 +2562,7 @@ static void monitor_exposure(struct cam_data *cam)
2562 gain = data[2]; 2562 gain = data[2];
2563 coarseL = data[3]; 2563 coarseL = data[3];
2564 2564
2565 down(&cam->param_lock); 2565 mutex_lock(&cam->param_lock);
2566 light_exp = cam->params.colourParams.brightness + 2566 light_exp = cam->params.colourParams.brightness +
2567 TC - 50 + EXP_ACC_LIGHT; 2567 TC - 50 + EXP_ACC_LIGHT;
2568 if(light_exp > 255) 2568 if(light_exp > 255)
@@ -2762,7 +2762,7 @@ static void monitor_exposure(struct cam_data *cam)
2762 LOG("Automatically increasing sensor_fps\n"); 2762 LOG("Automatically increasing sensor_fps\n");
2763 } 2763 }
2764 } 2764 }
2765 up(&cam->param_lock); 2765 mutex_unlock(&cam->param_lock);
2766} 2766}
2767 2767
2768/*-----------------------------------------------------------------*/ 2768/*-----------------------------------------------------------------*/
@@ -2778,10 +2778,10 @@ static void restart_flicker(struct cam_data *cam)
2778 int cam_exposure, old_exp; 2778 int cam_exposure, old_exp;
2779 if(!FIRMWARE_VERSION(1,2)) 2779 if(!FIRMWARE_VERSION(1,2))
2780 return; 2780 return;
2781 down(&cam->param_lock); 2781 mutex_lock(&cam->param_lock);
2782 if(cam->params.flickerControl.flickerMode == 0 || 2782 if(cam->params.flickerControl.flickerMode == 0 ||
2783 cam->raw_image[39] == 0) { 2783 cam->raw_image[39] == 0) {
2784 up(&cam->param_lock); 2784 mutex_unlock(&cam->param_lock);
2785 return; 2785 return;
2786 } 2786 }
2787 cam_exposure = cam->raw_image[39]*2; 2787 cam_exposure = cam->raw_image[39]*2;
@@ -2810,7 +2810,7 @@ static void restart_flicker(struct cam_data *cam)
2810 cam->exposure_status = EXPOSURE_NORMAL; 2810 cam->exposure_status = EXPOSURE_NORMAL;
2811 2811
2812 } 2812 }
2813 up(&cam->param_lock); 2813 mutex_unlock(&cam->param_lock);
2814} 2814}
2815#undef FIRMWARE_VERSION 2815#undef FIRMWARE_VERSION
2816 2816
@@ -3186,7 +3186,7 @@ static int cpia_open(struct inode *inode, struct file *file)
3186 if (!try_module_get(cam->ops->owner)) 3186 if (!try_module_get(cam->ops->owner))
3187 return -ENODEV; 3187 return -ENODEV;
3188 3188
3189 down(&cam->busy_lock); 3189 mutex_lock(&cam->busy_lock);
3190 err = -ENOMEM; 3190 err = -ENOMEM;
3191 if (!cam->raw_image) { 3191 if (!cam->raw_image) {
3192 cam->raw_image = rvmalloc(CPIA_MAX_IMAGE_SIZE); 3192 cam->raw_image = rvmalloc(CPIA_MAX_IMAGE_SIZE);
@@ -3227,7 +3227,7 @@ static int cpia_open(struct inode *inode, struct file *file)
3227 3227
3228 ++cam->open_count; 3228 ++cam->open_count;
3229 file->private_data = dev; 3229 file->private_data = dev;
3230 up(&cam->busy_lock); 3230 mutex_unlock(&cam->busy_lock);
3231 return 0; 3231 return 0;
3232 3232
3233 oops: 3233 oops:
@@ -3239,7 +3239,7 @@ static int cpia_open(struct inode *inode, struct file *file)
3239 rvfree(cam->raw_image, CPIA_MAX_IMAGE_SIZE); 3239 rvfree(cam->raw_image, CPIA_MAX_IMAGE_SIZE);
3240 cam->raw_image = NULL; 3240 cam->raw_image = NULL;
3241 } 3241 }
3242 up(&cam->busy_lock); 3242 mutex_unlock(&cam->busy_lock);
3243 put_cam(cam->ops); 3243 put_cam(cam->ops);
3244 return err; 3244 return err;
3245} 3245}
@@ -3303,24 +3303,24 @@ static ssize_t cpia_read(struct file *file, char __user *buf,
3303 int err; 3303 int err;
3304 3304
3305 /* make this _really_ smp and multithread-safe */ 3305 /* make this _really_ smp and multithread-safe */
3306 if (down_interruptible(&cam->busy_lock)) 3306 if (mutex_lock_interruptible(&cam->busy_lock))
3307 return -EINTR; 3307 return -EINTR;
3308 3308
3309 if (!buf) { 3309 if (!buf) {
3310 DBG("buf NULL\n"); 3310 DBG("buf NULL\n");
3311 up(&cam->busy_lock); 3311 mutex_unlock(&cam->busy_lock);
3312 return -EINVAL; 3312 return -EINVAL;
3313 } 3313 }
3314 3314
3315 if (!count) { 3315 if (!count) {
3316 DBG("count 0\n"); 3316 DBG("count 0\n");
3317 up(&cam->busy_lock); 3317 mutex_unlock(&cam->busy_lock);
3318 return 0; 3318 return 0;
3319 } 3319 }
3320 3320
3321 if (!cam->ops) { 3321 if (!cam->ops) {
3322 DBG("ops NULL\n"); 3322 DBG("ops NULL\n");
3323 up(&cam->busy_lock); 3323 mutex_unlock(&cam->busy_lock);
3324 return -ENODEV; 3324 return -ENODEV;
3325 } 3325 }
3326 3326
@@ -3329,7 +3329,7 @@ static ssize_t cpia_read(struct file *file, char __user *buf,
3329 cam->mmap_kludge=0; 3329 cam->mmap_kludge=0;
3330 if((err = fetch_frame(cam)) != 0) { 3330 if((err = fetch_frame(cam)) != 0) {
3331 DBG("ERROR from fetch_frame: %d\n", err); 3331 DBG("ERROR from fetch_frame: %d\n", err);
3332 up(&cam->busy_lock); 3332 mutex_unlock(&cam->busy_lock);
3333 return err; 3333 return err;
3334 } 3334 }
3335 cam->decompressed_frame.state = FRAME_UNUSED; 3335 cam->decompressed_frame.state = FRAME_UNUSED;
@@ -3338,17 +3338,17 @@ static ssize_t cpia_read(struct file *file, char __user *buf,
3338 if (cam->decompressed_frame.count > count) { 3338 if (cam->decompressed_frame.count > count) {
3339 DBG("count wrong: %d, %lu\n", cam->decompressed_frame.count, 3339 DBG("count wrong: %d, %lu\n", cam->decompressed_frame.count,
3340 (unsigned long) count); 3340 (unsigned long) count);
3341 up(&cam->busy_lock); 3341 mutex_unlock(&cam->busy_lock);
3342 return -EFAULT; 3342 return -EFAULT;
3343 } 3343 }
3344 if (copy_to_user(buf, cam->decompressed_frame.data, 3344 if (copy_to_user(buf, cam->decompressed_frame.data,
3345 cam->decompressed_frame.count)) { 3345 cam->decompressed_frame.count)) {
3346 DBG("copy_to_user failed\n"); 3346 DBG("copy_to_user failed\n");
3347 up(&cam->busy_lock); 3347 mutex_unlock(&cam->busy_lock);
3348 return -EFAULT; 3348 return -EFAULT;
3349 } 3349 }
3350 3350
3351 up(&cam->busy_lock); 3351 mutex_unlock(&cam->busy_lock);
3352 return cam->decompressed_frame.count; 3352 return cam->decompressed_frame.count;
3353} 3353}
3354 3354
@@ -3363,7 +3363,7 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3363 return -ENODEV; 3363 return -ENODEV;
3364 3364
3365 /* make this _really_ smp-safe */ 3365 /* make this _really_ smp-safe */
3366 if (down_interruptible(&cam->busy_lock)) 3366 if (mutex_lock_interruptible(&cam->busy_lock))
3367 return -EINTR; 3367 return -EINTR;
3368 3368
3369 //DBG("cpia_ioctl: %u\n", ioctlnr); 3369 //DBG("cpia_ioctl: %u\n", ioctlnr);
@@ -3439,7 +3439,7 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3439 break; 3439 break;
3440 } 3440 }
3441 3441
3442 down(&cam->param_lock); 3442 mutex_lock(&cam->param_lock);
3443 /* brightness, colour, contrast need no check 0-65535 */ 3443 /* brightness, colour, contrast need no check 0-65535 */
3444 cam->vp = *vp; 3444 cam->vp = *vp;
3445 /* update cam->params.colourParams */ 3445 /* update cam->params.colourParams */
@@ -3466,7 +3466,7 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3466 3466
3467 /* queue command to update camera */ 3467 /* queue command to update camera */
3468 cam->cmd_queue |= COMMAND_SETCOLOURPARAMS; 3468 cam->cmd_queue |= COMMAND_SETCOLOURPARAMS;
3469 up(&cam->param_lock); 3469 mutex_unlock(&cam->param_lock);
3470 DBG("VIDIOCSPICT: %d / %d // %d / %d / %d / %d\n", 3470 DBG("VIDIOCSPICT: %d / %d // %d / %d / %d / %d\n",
3471 vp->depth, vp->palette, vp->brightness, vp->hue, vp->colour, 3471 vp->depth, vp->palette, vp->brightness, vp->hue, vp->colour,
3472 vp->contrast); 3472 vp->contrast);
@@ -3501,13 +3501,13 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3501 /* we set the video window to something smaller or equal to what 3501 /* we set the video window to something smaller or equal to what
3502 * is requested by the user??? 3502 * is requested by the user???
3503 */ 3503 */
3504 down(&cam->param_lock); 3504 mutex_lock(&cam->param_lock);
3505 if (vw->width != cam->vw.width || vw->height != cam->vw.height) { 3505 if (vw->width != cam->vw.width || vw->height != cam->vw.height) {
3506 int video_size = match_videosize(vw->width, vw->height); 3506 int video_size = match_videosize(vw->width, vw->height);
3507 3507
3508 if (video_size < 0) { 3508 if (video_size < 0) {
3509 retval = -EINVAL; 3509 retval = -EINVAL;
3510 up(&cam->param_lock); 3510 mutex_unlock(&cam->param_lock);
3511 break; 3511 break;
3512 } 3512 }
3513 cam->video_size = video_size; 3513 cam->video_size = video_size;
@@ -3520,7 +3520,7 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3520 cam->cmd_queue |= COMMAND_SETFORMAT; 3520 cam->cmd_queue |= COMMAND_SETFORMAT;
3521 } 3521 }
3522 3522
3523 up(&cam->param_lock); 3523 mutex_unlock(&cam->param_lock);
3524 3524
3525 /* setformat ignored by camera during streaming, 3525 /* setformat ignored by camera during streaming,
3526 * so stop/dispatch/start */ 3526 * so stop/dispatch/start */
@@ -3682,7 +3682,7 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3682 3682
3683 DBG("%d,%d/%dx%d\n", vc->x,vc->y,vc->width, vc->height); 3683 DBG("%d,%d/%dx%d\n", vc->x,vc->y,vc->width, vc->height);
3684 3684
3685 down(&cam->param_lock); 3685 mutex_lock(&cam->param_lock);
3686 3686
3687 cam->vc.x = vc->x; 3687 cam->vc.x = vc->x;
3688 cam->vc.y = vc->y; 3688 cam->vc.y = vc->y;
@@ -3692,7 +3692,7 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3692 set_vw_size(cam); 3692 set_vw_size(cam);
3693 cam->cmd_queue |= COMMAND_SETFORMAT; 3693 cam->cmd_queue |= COMMAND_SETFORMAT;
3694 3694
3695 up(&cam->param_lock); 3695 mutex_unlock(&cam->param_lock);
3696 3696
3697 /* setformat ignored by camera during streaming, 3697 /* setformat ignored by camera during streaming,
3698 * so stop/dispatch/start */ 3698 * so stop/dispatch/start */
@@ -3736,7 +3736,7 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3736 break; 3736 break;
3737 } 3737 }
3738 3738
3739 up(&cam->busy_lock); 3739 mutex_unlock(&cam->busy_lock);
3740 return retval; 3740 return retval;
3741} 3741}
3742 3742
@@ -3769,12 +3769,12 @@ static int cpia_mmap(struct file *file, struct vm_area_struct *vma)
3769 return -ENODEV; 3769 return -ENODEV;
3770 3770
3771 /* make this _really_ smp-safe */ 3771 /* make this _really_ smp-safe */
3772 if (down_interruptible(&cam->busy_lock)) 3772 if (mutex_lock_interruptible(&cam->busy_lock))
3773 return -EINTR; 3773 return -EINTR;
3774 3774
3775 if (!cam->frame_buf) { /* we do lazy allocation */ 3775 if (!cam->frame_buf) { /* we do lazy allocation */
3776 if ((retval = allocate_frame_buf(cam))) { 3776 if ((retval = allocate_frame_buf(cam))) {
3777 up(&cam->busy_lock); 3777 mutex_unlock(&cam->busy_lock);
3778 return retval; 3778 return retval;
3779 } 3779 }
3780 } 3780 }
@@ -3783,7 +3783,7 @@ static int cpia_mmap(struct file *file, struct vm_area_struct *vma)
3783 while (size > 0) { 3783 while (size > 0) {
3784 page = vmalloc_to_pfn((void *)pos); 3784 page = vmalloc_to_pfn((void *)pos);
3785 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) { 3785 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) {
3786 up(&cam->busy_lock); 3786 mutex_unlock(&cam->busy_lock);
3787 return -EAGAIN; 3787 return -EAGAIN;
3788 } 3788 }
3789 start += PAGE_SIZE; 3789 start += PAGE_SIZE;
@@ -3795,7 +3795,7 @@ static int cpia_mmap(struct file *file, struct vm_area_struct *vma)
3795 } 3795 }
3796 3796
3797 DBG("cpia_mmap: %ld\n", size); 3797 DBG("cpia_mmap: %ld\n", size);
3798 up(&cam->busy_lock); 3798 mutex_unlock(&cam->busy_lock);
3799 3799
3800 return 0; 3800 return 0;
3801} 3801}
@@ -3936,8 +3936,8 @@ static void init_camera_struct(struct cam_data *cam,
3936 memset(cam, 0, sizeof(struct cam_data)); 3936 memset(cam, 0, sizeof(struct cam_data));
3937 3937
3938 cam->ops = ops; 3938 cam->ops = ops;
3939 init_MUTEX(&cam->param_lock); 3939 mutex_init(&cam->param_lock);
3940 init_MUTEX(&cam->busy_lock); 3940 mutex_init(&cam->busy_lock);
3941 3941
3942 reset_camera_struct(cam); 3942 reset_camera_struct(cam);
3943 3943
diff --git a/drivers/media/video/cpia.h b/drivers/media/video/cpia.h
index f629b693ee65..de6678200a57 100644
--- a/drivers/media/video/cpia.h
+++ b/drivers/media/video/cpia.h
@@ -47,6 +47,7 @@
47#include <linux/videodev.h> 47#include <linux/videodev.h>
48#include <linux/list.h> 48#include <linux/list.h>
49#include <linux/smp_lock.h> 49#include <linux/smp_lock.h>
50#include <linux/mutex.h>
50 51
51struct cpia_camera_ops 52struct cpia_camera_ops
52{ 53{
@@ -246,7 +247,7 @@ enum v4l_camstates {
246struct cam_data { 247struct cam_data {
247 struct list_head cam_data_list; 248 struct list_head cam_data_list;
248 249
249 struct semaphore busy_lock; /* guard against SMP multithreading */ 250 struct mutex busy_lock; /* guard against SMP multithreading */
250 struct cpia_camera_ops *ops; /* lowlevel driver operations */ 251 struct cpia_camera_ops *ops; /* lowlevel driver operations */
251 void *lowlevel_data; /* private data for lowlevel driver */ 252 void *lowlevel_data; /* private data for lowlevel driver */
252 u8 *raw_image; /* buffer for raw image data */ 253 u8 *raw_image; /* buffer for raw image data */
@@ -261,7 +262,7 @@ struct cam_data {
261 u8 mainsFreq; /* for flicker control */ 262 u8 mainsFreq; /* for flicker control */
262 263
263 /* proc interface */ 264 /* proc interface */
264 struct semaphore param_lock; /* params lock for this camera */ 265 struct mutex param_lock; /* params lock for this camera */
265 struct cam_params params; /* camera settings */ 266 struct cam_params params; /* camera settings */
266 struct proc_dir_entry *proc_entry; /* /proc/cpia/videoX */ 267 struct proc_dir_entry *proc_entry; /* /proc/cpia/videoX */
267 268
diff --git a/drivers/media/video/cpia2/Kconfig b/drivers/media/video/cpia2/Kconfig
new file mode 100644
index 000000000000..513cc0927389
--- /dev/null
+++ b/drivers/media/video/cpia2/Kconfig
@@ -0,0 +1,9 @@
1config VIDEO_CPIA2
2 tristate "CPiA2 Video For Linux"
3 depends on VIDEO_DEV && USB
4 ---help---
5 This is the video4linux driver for cameras based on Vision's CPiA2
6 (Colour Processor Interface ASIC), such as the Digital Blue QX5
7 Microscope. If you have one of these cameras, say Y here
8
9 This driver is also available as a module (cpia2).
diff --git a/drivers/media/video/cpia2/Makefile b/drivers/media/video/cpia2/Makefile
new file mode 100644
index 000000000000..828cf1b1df86
--- /dev/null
+++ b/drivers/media/video/cpia2/Makefile
@@ -0,0 +1,3 @@
1cpia2-objs := cpia2_v4l.o cpia2_usb.o cpia2_core.o
2
3obj-$(CONFIG_VIDEO_CPIA2) += cpia2.o
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
new file mode 100644
index 000000000000..95d3afa94a3d
--- /dev/null
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -0,0 +1,497 @@
1/****************************************************************************
2 *
3 * Filename: cpia2.h
4 *
5 * Copyright 2001, STMicrolectronics, Inc.
6 *
7 * Contact: steve.miller@st.com
8 *
9 * Description:
10 * This is a USB driver for CPiA2 based video cameras.
11 *
12 * This driver is modelled on the cpia usb driver by
13 * Jochen Scharrlach and Johannes Erdfeldt.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 ****************************************************************************/
30
31#ifndef __CPIA2_H__
32#define __CPIA2_H__
33
34#include <linux/version.h>
35#include <linux/videodev.h>
36#include <linux/usb.h>
37#include <linux/poll.h>
38
39#include "cpia2dev.h"
40#include "cpia2_registers.h"
41
42/* define for verbose debug output */
43//#define _CPIA2_DEBUG_
44
45#define CPIA2_MAJ_VER 2
46#define CPIA2_MIN_VER 0
47#define CPIA2_PATCH_VER 0
48
49/***
50 * Image defines
51 ***/
52#ifndef true
53#define true 1
54#define false 0
55#endif
56
57/* Misc constants */
58#define ALLOW_CORRUPT 0 /* Causes collater to discard checksum */
59
60/* USB Transfer mode */
61#define XFER_ISOC 0
62#define XFER_BULK 1
63
64/* USB Alternates */
65#define USBIF_CMDONLY 0
66#define USBIF_BULK 1
67#define USBIF_ISO_1 2 /* 128 bytes/ms */
68#define USBIF_ISO_2 3 /* 384 bytes/ms */
69#define USBIF_ISO_3 4 /* 640 bytes/ms */
70#define USBIF_ISO_4 5 /* 768 bytes/ms */
71#define USBIF_ISO_5 6 /* 896 bytes/ms */
72#define USBIF_ISO_6 7 /* 1023 bytes/ms */
73
74/* Flicker Modes */
75#define NEVER_FLICKER 0
76#define ANTI_FLICKER_ON 1
77#define FLICKER_60 60
78#define FLICKER_50 50
79
80/* Debug flags */
81#define DEBUG_NONE 0
82#define DEBUG_REG 0x00000001
83#define DEBUG_DUMP_PATCH 0x00000002
84#define DEBUG_DUMP_REGS 0x00000004
85
86/***
87 * Video frame sizes
88 ***/
89enum {
90 VIDEOSIZE_VGA = 0, /* 640x480 */
91 VIDEOSIZE_CIF, /* 352x288 */
92 VIDEOSIZE_QVGA, /* 320x240 */
93 VIDEOSIZE_QCIF, /* 176x144 */
94 VIDEOSIZE_288_216,
95 VIDEOSIZE_256_192,
96 VIDEOSIZE_224_168,
97 VIDEOSIZE_192_144,
98};
99
100#define STV_IMAGE_CIF_ROWS 288
101#define STV_IMAGE_CIF_COLS 352
102
103#define STV_IMAGE_QCIF_ROWS 144
104#define STV_IMAGE_QCIF_COLS 176
105
106#define STV_IMAGE_VGA_ROWS 480
107#define STV_IMAGE_VGA_COLS 640
108
109#define STV_IMAGE_QVGA_ROWS 240
110#define STV_IMAGE_QVGA_COLS 320
111
112#define JPEG_MARKER_COM (1<<6) /* Comment segment */
113
114/***
115 * Enums
116 ***/
117/* Sensor types available with cpia2 asics */
118enum sensors {
119 CPIA2_SENSOR_410,
120 CPIA2_SENSOR_500
121};
122
123/* Asic types available in the CPiA2 architecture */
124#define CPIA2_ASIC_672 0x67
125
126/* Device types (stv672, stv676, etc) */
127#define DEVICE_STV_672 0x0001
128#define DEVICE_STV_676 0x0002
129
130enum frame_status {
131 FRAME_EMPTY,
132 FRAME_READING, /* In the process of being grabbed into */
133 FRAME_READY, /* Ready to be read */
134 FRAME_ERROR,
135};
136
137/***
138 * Register access (for USB request byte)
139 ***/
140enum {
141 CAMERAACCESS_SYSTEM = 0,
142 CAMERAACCESS_VC,
143 CAMERAACCESS_VP,
144 CAMERAACCESS_IDATA
145};
146
147#define CAMERAACCESS_TYPE_BLOCK 0x00
148#define CAMERAACCESS_TYPE_RANDOM 0x04
149#define CAMERAACCESS_TYPE_MASK 0x08
150#define CAMERAACCESS_TYPE_REPEAT 0x0C
151
152#define TRANSFER_READ 0
153#define TRANSFER_WRITE 1
154
155#define DEFAULT_ALT USBIF_ISO_6
156#define DEFAULT_BRIGHTNESS 0x46
157#define DEFAULT_CONTRAST 0x93
158#define DEFAULT_SATURATION 0x7f
159#define DEFAULT_TARGET_KB 0x30
160
161/* Power state */
162#define HI_POWER_MODE CPIA2_SYSTEM_CONTROL_HIGH_POWER
163#define LO_POWER_MODE CPIA2_SYSTEM_CONTROL_LOW_POWER
164
165
166/********
167 * Commands
168 *******/
169enum {
170 CPIA2_CMD_NONE = 0,
171 CPIA2_CMD_GET_VERSION,
172 CPIA2_CMD_GET_PNP_ID,
173 CPIA2_CMD_GET_ASIC_TYPE,
174 CPIA2_CMD_GET_SENSOR,
175 CPIA2_CMD_GET_VP_DEVICE,
176 CPIA2_CMD_GET_VP_BRIGHTNESS,
177 CPIA2_CMD_SET_VP_BRIGHTNESS,
178 CPIA2_CMD_GET_CONTRAST,
179 CPIA2_CMD_SET_CONTRAST,
180 CPIA2_CMD_GET_VP_SATURATION,
181 CPIA2_CMD_SET_VP_SATURATION,
182 CPIA2_CMD_GET_VP_GPIO_DIRECTION,
183 CPIA2_CMD_SET_VP_GPIO_DIRECTION,
184 CPIA2_CMD_GET_VP_GPIO_DATA,
185 CPIA2_CMD_SET_VP_GPIO_DATA,
186 CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION,
187 CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION,
188 CPIA2_CMD_GET_VC_MP_GPIO_DATA,
189 CPIA2_CMD_SET_VC_MP_GPIO_DATA,
190 CPIA2_CMD_ENABLE_PACKET_CTRL,
191 CPIA2_CMD_GET_FLICKER_MODES,
192 CPIA2_CMD_SET_FLICKER_MODES,
193 CPIA2_CMD_RESET_FIFO, /* clear fifo and enable stream block */
194 CPIA2_CMD_SET_HI_POWER,
195 CPIA2_CMD_SET_LOW_POWER,
196 CPIA2_CMD_CLEAR_V2W_ERR,
197 CPIA2_CMD_SET_USER_MODE,
198 CPIA2_CMD_GET_USER_MODE,
199 CPIA2_CMD_FRAMERATE_REQ,
200 CPIA2_CMD_SET_COMPRESSION_STATE,
201 CPIA2_CMD_GET_WAKEUP,
202 CPIA2_CMD_SET_WAKEUP,
203 CPIA2_CMD_GET_PW_CONTROL,
204 CPIA2_CMD_SET_PW_CONTROL,
205 CPIA2_CMD_GET_SYSTEM_CTRL,
206 CPIA2_CMD_SET_SYSTEM_CTRL,
207 CPIA2_CMD_GET_VP_SYSTEM_STATE,
208 CPIA2_CMD_GET_VP_SYSTEM_CTRL,
209 CPIA2_CMD_SET_VP_SYSTEM_CTRL,
210 CPIA2_CMD_GET_VP_EXP_MODES,
211 CPIA2_CMD_SET_VP_EXP_MODES,
212 CPIA2_CMD_GET_DEVICE_CONFIG,
213 CPIA2_CMD_SET_DEVICE_CONFIG,
214 CPIA2_CMD_SET_SERIAL_ADDR,
215 CPIA2_CMD_SET_SENSOR_CR1,
216 CPIA2_CMD_GET_VC_CONTROL,
217 CPIA2_CMD_SET_VC_CONTROL,
218 CPIA2_CMD_SET_TARGET_KB,
219 CPIA2_CMD_SET_DEF_JPEG_OPT,
220 CPIA2_CMD_REHASH_VP4,
221 CPIA2_CMD_GET_USER_EFFECTS,
222 CPIA2_CMD_SET_USER_EFFECTS
223};
224
225enum user_cmd {
226 COMMAND_NONE = 0x00000001,
227 COMMAND_SET_FPS = 0x00000002,
228 COMMAND_SET_COLOR_PARAMS = 0x00000004,
229 COMMAND_GET_COLOR_PARAMS = 0x00000008,
230 COMMAND_SET_FORMAT = 0x00000010, /* size, etc */
231 COMMAND_SET_FLICKER = 0x00000020
232};
233
234/***
235 * Some defines specific to the 676 chip
236 ***/
237#define CAMACC_CIF 0x01
238#define CAMACC_VGA 0x02
239#define CAMACC_QCIF 0x04
240#define CAMACC_QVGA 0x08
241
242
243struct cpia2_register {
244 u8 index;
245 u8 value;
246};
247
248struct cpia2_reg_mask {
249 u8 index;
250 u8 and_mask;
251 u8 or_mask;
252 u8 fill;
253};
254
255struct cpia2_command {
256 u32 command;
257 u8 req_mode; /* (Block or random) | registerBank */
258 u8 reg_count;
259 u8 direction;
260 u8 start;
261 union reg_types {
262 struct cpia2_register registers[32];
263 struct cpia2_reg_mask masks[16];
264 u8 block_data[64];
265 u8 *patch_data; /* points to function defined block */
266 } buffer;
267};
268
269struct camera_params {
270 struct {
271 u8 firmware_revision_hi; /* For system register set (bank 0) */
272 u8 firmware_revision_lo;
273 u8 asic_id; /* Video Compressor set (bank 1) */
274 u8 asic_rev;
275 u8 vp_device_hi; /* Video Processor set (bank 2) */
276 u8 vp_device_lo;
277 u8 sensor_flags;
278 u8 sensor_rev;
279 } version;
280
281 struct {
282 u32 device_type; /* enumerated from vendor/product ids.
283 * Currently, either STV_672 or STV_676 */
284 u16 vendor;
285 u16 product;
286 u16 device_revision;
287 } pnp_id;
288
289 struct {
290 u8 brightness; /* CPIA2_VP_EXPOSURE_TARGET */
291 u8 contrast; /* Note: this is CPIA2_VP_YRANGE */
292 u8 saturation; /* CPIA2_VP_SATURATION */
293 } color_params;
294
295 struct {
296 u8 cam_register;
297 u8 flicker_mode_req; /* 1 if flicker on, else never flicker */
298 int mains_frequency;
299 } flicker_control;
300
301 struct {
302 u8 jpeg_options;
303 u8 creep_period;
304 u8 user_squeeze;
305 u8 inhibit_htables;
306 } compression;
307
308 struct {
309 u8 ohsize; /* output image size */
310 u8 ovsize;
311 u8 hcrop; /* cropping start_pos/4 */
312 u8 vcrop;
313 u8 hphase; /* scaling registers */
314 u8 vphase;
315 u8 hispan;
316 u8 vispan;
317 u8 hicrop;
318 u8 vicrop;
319 u8 hifraction;
320 u8 vifraction;
321 } image_size;
322
323 struct {
324 int width; /* actual window width */
325 int height; /* actual window height */
326 } roi;
327
328 struct {
329 u8 video_mode;
330 u8 frame_rate;
331 u8 video_size; /* Not a register, just a convenience for cropped sizes */
332 u8 gpio_direction;
333 u8 gpio_data;
334 u8 system_ctrl;
335 u8 system_state;
336 u8 lowlight_boost; /* Bool: 0 = off, 1 = on */
337 u8 device_config;
338 u8 exposure_modes;
339 u8 user_effects;
340 } vp_params;
341
342 struct {
343 u8 pw_control;
344 u8 wakeup;
345 u8 vc_control;
346 u8 vc_mp_direction;
347 u8 vc_mp_data;
348 u8 target_kb;
349 } vc_params;
350
351 struct {
352 u8 power_mode;
353 u8 system_ctrl;
354 u8 stream_mode; /* This is the current alternate for usb drivers */
355 u8 allow_corrupt;
356 } camera_state;
357};
358
359#define NUM_SBUF 2
360
361struct cpia2_sbuf {
362 char *data;
363 struct urb *urb;
364};
365
366struct framebuf {
367 struct timeval timestamp;
368 unsigned long seq;
369 int num;
370 int length;
371 int max_length;
372 volatile enum frame_status status;
373 u8 *data;
374 struct framebuf *next;
375};
376
377struct cpia2_fh {
378 enum v4l2_priority prio;
379 u8 mmapped;
380};
381
382struct camera_data {
383 /* locks */
384 struct semaphore busy_lock; /* guard against SMP multithreading */
385 struct v4l2_prio_state prio;
386
387 /* camera status */
388 volatile int present; /* Is the camera still present? */
389 int open_count; /* # of process that have camera open */
390 int first_image_seen;
391 u8 mains_freq; /* for flicker control */
392 enum sensors sensor_type;
393 u8 flush;
394 u8 mmapped;
395 int streaming; /* 0 = no, 1 = yes */
396 int xfer_mode; /* XFER_BULK or XFER_ISOC */
397 struct camera_params params; /* camera settings */
398
399 /* v4l */
400 int video_size; /* VIDEO_SIZE_ */
401 struct video_device *vdev; /* v4l videodev */
402 struct video_picture vp; /* v4l camera settings */
403 struct video_window vw; /* v4l capture area */
404 __u32 pixelformat; /* Format fourcc */
405
406 /* USB */
407 struct usb_device *dev;
408 unsigned char iface;
409 unsigned int cur_alt;
410 unsigned int old_alt;
411 struct cpia2_sbuf sbuf[NUM_SBUF]; /* Double buffering */
412
413 wait_queue_head_t wq_stream;
414
415 /* Buffering */
416 u32 frame_size;
417 int num_frames;
418 unsigned long frame_count;
419 u8 *frame_buffer; /* frame buffer data */
420 struct framebuf *buffers;
421 struct framebuf * volatile curbuff;
422 struct framebuf *workbuff;
423
424 /* MJPEG Extension */
425 int APPn; /* Number of APP segment to be written, must be 0..15 */
426 int APP_len; /* Length of data in JPEG APPn segment */
427 char APP_data[60]; /* Data in the JPEG APPn segment. */
428
429 int COM_len; /* Length of data in JPEG COM segment */
430 char COM_data[60]; /* Data in JPEG COM segment */
431};
432
433/* v4l */
434int cpia2_register_camera(struct camera_data *cam);
435void cpia2_unregister_camera(struct camera_data *cam);
436
437/* core */
438int cpia2_reset_camera(struct camera_data *cam);
439int cpia2_set_low_power(struct camera_data *cam);
440void cpia2_dbg_dump_registers(struct camera_data *cam);
441int cpia2_match_video_size(int width, int height);
442void cpia2_set_camera_state(struct camera_data *cam);
443void cpia2_save_camera_state(struct camera_data *cam);
444void cpia2_set_color_params(struct camera_data *cam);
445void cpia2_set_brightness(struct camera_data *cam, unsigned char value);
446void cpia2_set_contrast(struct camera_data *cam, unsigned char value);
447void cpia2_set_saturation(struct camera_data *cam, unsigned char value);
448int cpia2_set_flicker_mode(struct camera_data *cam, int mode);
449void cpia2_set_format(struct camera_data *cam);
450int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd);
451int cpia2_do_command(struct camera_data *cam,
452 unsigned int command,
453 unsigned char direction, unsigned char param);
454struct camera_data *cpia2_init_camera_struct(void);
455int cpia2_init_camera(struct camera_data *cam);
456int cpia2_allocate_buffers(struct camera_data *cam);
457void cpia2_free_buffers(struct camera_data *cam);
458long cpia2_read(struct camera_data *cam,
459 char *buf, unsigned long count, int noblock);
460unsigned int cpia2_poll(struct camera_data *cam,
461 struct file *filp, poll_table *wait);
462int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
463void cpia2_set_property_flip(struct camera_data *cam, int prop_val);
464void cpia2_set_property_mirror(struct camera_data *cam, int prop_val);
465int cpia2_set_target_kb(struct camera_data *cam, unsigned char value);
466int cpia2_set_gpio(struct camera_data *cam, unsigned char setting);
467int cpia2_set_fps(struct camera_data *cam, int framerate);
468
469/* usb */
470int cpia2_usb_init(void);
471void cpia2_usb_cleanup(void);
472int cpia2_usb_transfer_cmd(struct camera_data *cam, void *registers,
473 u8 request, u8 start, u8 count, u8 direction);
474int cpia2_usb_stream_start(struct camera_data *cam, unsigned int alternate);
475int cpia2_usb_stream_stop(struct camera_data *cam);
476int cpia2_usb_stream_pause(struct camera_data *cam);
477int cpia2_usb_stream_resume(struct camera_data *cam);
478int cpia2_usb_change_streaming_alternate(struct camera_data *cam,
479 unsigned int alt);
480
481
482/* ----------------------- debug functions ---------------------- */
483#ifdef _CPIA2_DEBUG_
484#define ALOG(lev, fmt, args...) printk(lev "%s:%d %s(): " fmt, __FILE__, __LINE__, __func__, ## args)
485#define LOG(fmt, args...) ALOG(KERN_INFO, fmt, ## args)
486#define ERR(fmt, args...) ALOG(KERN_ERR, fmt, ## args)
487#define DBG(fmt, args...) ALOG(KERN_DEBUG, fmt, ## args)
488#else
489#define ALOG(fmt,args...) printk(fmt,##args)
490#define LOG(fmt,args...) ALOG(KERN_INFO "cpia2: "fmt,##args)
491#define ERR(fmt,args...) ALOG(KERN_ERR "cpia2: "fmt,##args)
492#define DBG(fmn,args...) do {} while(0)
493#endif
494/* No function or lineno, for shorter lines */
495#define KINFO(fmt, args...) printk(KERN_INFO fmt,##args)
496
497#endif
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
new file mode 100644
index 000000000000..5dfb242d5b8c
--- /dev/null
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -0,0 +1,2525 @@
1/****************************************************************************
2 *
3 * Filename: cpia2_core.c
4 *
5 * Copyright 2001, STMicrolectronics, Inc.
6 * Contact: steve.miller@st.com
7 *
8 * Description:
9 * This is a USB driver for CPia2 based video cameras.
10 * The infrastructure of this driver is based on the cpia usb driver by
11 * Jochen Scharrlach and Johannes Erdfeldt.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * Stripped of 2.4 stuff ready for main kernel submit by
28 * Alan Cox <alan@redhat.com>
29 *
30 ****************************************************************************/
31
32#include "cpia2.h"
33
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36
37//#define _CPIA2_DEBUG_
38
39#include "cpia2patch.h"
40
41#ifdef _CPIA2_DEBUG_
42
43static const char *block_name[] = {
44 "System",
45 "VC",
46 "VP",
47 "IDATA"
48};
49#endif
50
51static unsigned int debugs_on = 0;//DEBUG_REG;
52
53
54/******************************************************************************
55 *
56 * Forward Declarations
57 *
58 *****************************************************************************/
59static int apply_vp_patch(struct camera_data *cam);
60static int set_default_user_mode(struct camera_data *cam);
61static int set_vw_size(struct camera_data *cam, int size);
62static int configure_sensor(struct camera_data *cam,
63 int reqwidth, int reqheight);
64static int config_sensor_410(struct camera_data *cam,
65 int reqwidth, int reqheight);
66static int config_sensor_500(struct camera_data *cam,
67 int reqwidth, int reqheight);
68static int set_all_properties(struct camera_data *cam);
69static void get_color_params(struct camera_data *cam);
70static void wake_system(struct camera_data *cam);
71static void set_lowlight_boost(struct camera_data *cam);
72static void reset_camera_struct(struct camera_data *cam);
73static int cpia2_set_high_power(struct camera_data *cam);
74
75/* Here we want the physical address of the memory.
76 * This is used when initializing the contents of the
77 * area and marking the pages as reserved.
78 */
79static inline unsigned long kvirt_to_pa(unsigned long adr)
80{
81 unsigned long kva, ret;
82
83 kva = (unsigned long) page_address(vmalloc_to_page((void *)adr));
84 kva |= adr & (PAGE_SIZE-1); /* restore the offset */
85 ret = __pa(kva);
86 return ret;
87}
88
89static void *rvmalloc(unsigned long size)
90{
91 void *mem;
92 unsigned long adr;
93
94 /* Round it off to PAGE_SIZE */
95 size = PAGE_ALIGN(size);
96
97 mem = vmalloc_32(size);
98 if (!mem)
99 return NULL;
100
101 memset(mem, 0, size); /* Clear the ram out, no junk to the user */
102 adr = (unsigned long) mem;
103
104 while ((long)size > 0) {
105 SetPageReserved(vmalloc_to_page((void *)adr));
106 adr += PAGE_SIZE;
107 size -= PAGE_SIZE;
108 }
109 return mem;
110}
111
112static void rvfree(void *mem, unsigned long size)
113{
114 unsigned long adr;
115
116 if (!mem)
117 return;
118
119 size = PAGE_ALIGN(size);
120
121 adr = (unsigned long) mem;
122 while ((long)size > 0) {
123 ClearPageReserved(vmalloc_to_page((void *)adr));
124 adr += PAGE_SIZE;
125 size -= PAGE_SIZE;
126 }
127 vfree(mem);
128}
129
130/******************************************************************************
131 *
132 * cpia2_do_command
133 *
134 * Send an arbitrary command to the camera. For commands that read from
135 * the camera, copy the buffers into the proper param structures.
136 *****************************************************************************/
137int cpia2_do_command(struct camera_data *cam,
138 u32 command, u8 direction, u8 param)
139{
140 int retval = 0;
141 struct cpia2_command cmd;
142 unsigned int device = cam->params.pnp_id.device_type;
143
144 cmd.command = command;
145 cmd.reg_count = 2; /* default */
146 cmd.direction = direction;
147
148 /***
149 * Set up the command.
150 ***/
151 switch (command) {
152 case CPIA2_CMD_GET_VERSION:
153 cmd.req_mode =
154 CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
155 cmd.start = CPIA2_SYSTEM_DEVICE_HI;
156 break;
157 case CPIA2_CMD_GET_PNP_ID:
158 cmd.req_mode =
159 CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
160 cmd.reg_count = 8;
161 cmd.start = CPIA2_SYSTEM_DESCRIP_VID_HI;
162 break;
163 case CPIA2_CMD_GET_ASIC_TYPE:
164 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
165 cmd.start = CPIA2_VC_ASIC_ID;
166 break;
167 case CPIA2_CMD_GET_SENSOR:
168 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
169 cmd.start = CPIA2_VP_SENSOR_FLAGS;
170 break;
171 case CPIA2_CMD_GET_VP_DEVICE:
172 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
173 cmd.start = CPIA2_VP_DEVICEH;
174 break;
175 case CPIA2_CMD_SET_VP_BRIGHTNESS:
176 cmd.buffer.block_data[0] = param; /* Then fall through */
177 case CPIA2_CMD_GET_VP_BRIGHTNESS:
178 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
179 cmd.reg_count = 1;
180 if (device == DEVICE_STV_672)
181 cmd.start = CPIA2_VP4_EXPOSURE_TARGET;
182 else
183 cmd.start = CPIA2_VP5_EXPOSURE_TARGET;
184 break;
185 case CPIA2_CMD_SET_CONTRAST:
186 cmd.buffer.block_data[0] = param; /* Then fall through */
187 case CPIA2_CMD_GET_CONTRAST:
188 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
189 cmd.reg_count = 1;
190 cmd.start = CPIA2_VP_YRANGE;
191 break;
192 case CPIA2_CMD_SET_VP_SATURATION:
193 cmd.buffer.block_data[0] = param; /* Then fall through */
194 case CPIA2_CMD_GET_VP_SATURATION:
195 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
196 cmd.reg_count = 1;
197 if (device == DEVICE_STV_672)
198 cmd.start = CPIA2_VP_SATURATION;
199 else
200 cmd.start = CPIA2_VP5_MCUVSATURATION;
201 break;
202 case CPIA2_CMD_SET_VP_GPIO_DATA:
203 cmd.buffer.block_data[0] = param; /* Then fall through */
204 case CPIA2_CMD_GET_VP_GPIO_DATA:
205 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
206 cmd.reg_count = 1;
207 cmd.start = CPIA2_VP_GPIO_DATA;
208 break;
209 case CPIA2_CMD_SET_VP_GPIO_DIRECTION:
210 cmd.buffer.block_data[0] = param; /* Then fall through */
211 case CPIA2_CMD_GET_VP_GPIO_DIRECTION:
212 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
213 cmd.reg_count = 1;
214 cmd.start = CPIA2_VP_GPIO_DIRECTION;
215 break;
216 case CPIA2_CMD_SET_VC_MP_GPIO_DATA:
217 cmd.buffer.block_data[0] = param; /* Then fall through */
218 case CPIA2_CMD_GET_VC_MP_GPIO_DATA:
219 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
220 cmd.reg_count = 1;
221 cmd.start = CPIA2_VC_MP_DATA;
222 break;
223 case CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION:
224 cmd.buffer.block_data[0] = param; /* Then fall through */
225 case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION:
226 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
227 cmd.reg_count = 1;
228 cmd.start = CPIA2_VC_MP_DIR;
229 break;
230 case CPIA2_CMD_ENABLE_PACKET_CTRL:
231 cmd.req_mode =
232 CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
233 cmd.start = CPIA2_SYSTEM_INT_PACKET_CTRL;
234 cmd.reg_count = 1;
235 cmd.buffer.block_data[0] = param;
236 break;
237 case CPIA2_CMD_SET_FLICKER_MODES:
238 cmd.buffer.block_data[0] = param; /* Then fall through */
239 case CPIA2_CMD_GET_FLICKER_MODES:
240 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
241 cmd.reg_count = 1;
242 cmd.start = CPIA2_VP_FLICKER_MODES;
243 break;
244 case CPIA2_CMD_RESET_FIFO: /* clear fifo and enable stream block */
245 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC;
246 cmd.reg_count = 2;
247 cmd.start = 0;
248 cmd.buffer.registers[0].index = CPIA2_VC_ST_CTRL;
249 cmd.buffer.registers[0].value = CPIA2_VC_ST_CTRL_SRC_VC |
250 CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT;
251 cmd.buffer.registers[1].index = CPIA2_VC_ST_CTRL;
252 cmd.buffer.registers[1].value = CPIA2_VC_ST_CTRL_SRC_VC |
253 CPIA2_VC_ST_CTRL_DST_USB |
254 CPIA2_VC_ST_CTRL_EOF_DETECT |
255 CPIA2_VC_ST_CTRL_FIFO_ENABLE;
256 break;
257 case CPIA2_CMD_SET_HI_POWER:
258 cmd.req_mode =
259 CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_SYSTEM;
260 cmd.reg_count = 2;
261 cmd.buffer.registers[0].index =
262 CPIA2_SYSTEM_SYSTEM_CONTROL;
263 cmd.buffer.registers[1].index =
264 CPIA2_SYSTEM_SYSTEM_CONTROL;
265 cmd.buffer.registers[0].value = CPIA2_SYSTEM_CONTROL_CLEAR_ERR;
266 cmd.buffer.registers[1].value =
267 CPIA2_SYSTEM_CONTROL_HIGH_POWER;
268 break;
269 case CPIA2_CMD_SET_LOW_POWER:
270 cmd.req_mode =
271 CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
272 cmd.reg_count = 1;
273 cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL;
274 cmd.buffer.block_data[0] = 0;
275 break;
276 case CPIA2_CMD_CLEAR_V2W_ERR:
277 cmd.req_mode =
278 CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
279 cmd.reg_count = 1;
280 cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL;
281 cmd.buffer.block_data[0] = CPIA2_SYSTEM_CONTROL_CLEAR_ERR;
282 break;
283 case CPIA2_CMD_SET_USER_MODE: /* Then fall through */
284 cmd.buffer.block_data[0] = param;
285 case CPIA2_CMD_GET_USER_MODE:
286 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
287 cmd.reg_count = 1;
288 if (device == DEVICE_STV_672)
289 cmd.start = CPIA2_VP4_USER_MODE;
290 else
291 cmd.start = CPIA2_VP5_USER_MODE;
292 break;
293 case CPIA2_CMD_FRAMERATE_REQ:
294 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
295 cmd.reg_count = 1;
296 if (device == DEVICE_STV_672)
297 cmd.start = CPIA2_VP4_FRAMERATE_REQUEST;
298 else
299 cmd.start = CPIA2_VP5_FRAMERATE_REQUEST;
300 cmd.buffer.block_data[0] = param;
301 break;
302 case CPIA2_CMD_SET_WAKEUP:
303 cmd.buffer.block_data[0] = param; /* Then fall through */
304 case CPIA2_CMD_GET_WAKEUP:
305 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
306 cmd.reg_count = 1;
307 cmd.start = CPIA2_VC_WAKEUP;
308 break;
309 case CPIA2_CMD_SET_PW_CONTROL:
310 cmd.buffer.block_data[0] = param; /* Then fall through */
311 case CPIA2_CMD_GET_PW_CONTROL:
312 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
313 cmd.reg_count = 1;
314 cmd.start = CPIA2_VC_PW_CTRL;
315 break;
316 case CPIA2_CMD_GET_VP_SYSTEM_STATE:
317 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
318 cmd.reg_count = 1;
319 cmd.start = CPIA2_VP_SYSTEMSTATE;
320 break;
321 case CPIA2_CMD_SET_SYSTEM_CTRL:
322 cmd.buffer.block_data[0] = param; /* Then fall through */
323 case CPIA2_CMD_GET_SYSTEM_CTRL:
324 cmd.req_mode =
325 CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
326 cmd.reg_count = 1;
327 cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL;
328 break;
329 case CPIA2_CMD_SET_VP_SYSTEM_CTRL:
330 cmd.buffer.block_data[0] = param; /* Then fall through */
331 case CPIA2_CMD_GET_VP_SYSTEM_CTRL:
332 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
333 cmd.reg_count = 1;
334 cmd.start = CPIA2_VP_SYSTEMCTRL;
335 break;
336 case CPIA2_CMD_SET_VP_EXP_MODES:
337 cmd.buffer.block_data[0] = param; /* Then fall through */
338 case CPIA2_CMD_GET_VP_EXP_MODES:
339 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
340 cmd.reg_count = 1;
341 cmd.start = CPIA2_VP_EXPOSURE_MODES;
342 break;
343 case CPIA2_CMD_SET_DEVICE_CONFIG:
344 cmd.buffer.block_data[0] = param; /* Then fall through */
345 case CPIA2_CMD_GET_DEVICE_CONFIG:
346 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
347 cmd.reg_count = 1;
348 cmd.start = CPIA2_VP_DEVICE_CONFIG;
349 break;
350 case CPIA2_CMD_SET_SERIAL_ADDR:
351 cmd.buffer.block_data[0] = param;
352 cmd.req_mode =
353 CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
354 cmd.reg_count = 1;
355 cmd.start = CPIA2_SYSTEM_VP_SERIAL_ADDR;
356 break;
357 case CPIA2_CMD_SET_SENSOR_CR1:
358 cmd.buffer.block_data[0] = param;
359 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
360 cmd.reg_count = 1;
361 cmd.start = CPIA2_SENSOR_CR1;
362 break;
363 case CPIA2_CMD_SET_VC_CONTROL:
364 cmd.buffer.block_data[0] = param; /* Then fall through */
365 case CPIA2_CMD_GET_VC_CONTROL:
366 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
367 cmd.reg_count = 1;
368 cmd.start = CPIA2_VC_VC_CTRL;
369 break;
370 case CPIA2_CMD_SET_TARGET_KB:
371 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC;
372 cmd.reg_count = 1;
373 cmd.buffer.registers[0].index = CPIA2_VC_VC_TARGET_KB;
374 cmd.buffer.registers[0].value = param;
375 break;
376 case CPIA2_CMD_SET_DEF_JPEG_OPT:
377 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC;
378 cmd.reg_count = 4;
379 cmd.buffer.registers[0].index = CPIA2_VC_VC_JPEG_OPT;
380 cmd.buffer.registers[0].value =
381 CPIA2_VC_VC_JPEG_OPT_DOUBLE_SQUEEZE;
382 cmd.buffer.registers[1].index = CPIA2_VC_VC_USER_SQUEEZE;
383 cmd.buffer.registers[1].value = 20;
384 cmd.buffer.registers[2].index = CPIA2_VC_VC_CREEP_PERIOD;
385 cmd.buffer.registers[2].value = 2;
386 cmd.buffer.registers[3].index = CPIA2_VC_VC_JPEG_OPT;
387 cmd.buffer.registers[3].value = CPIA2_VC_VC_JPEG_OPT_DEFAULT;
388 break;
389 case CPIA2_CMD_REHASH_VP4:
390 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
391 cmd.reg_count = 1;
392 cmd.start = CPIA2_VP_REHASH_VALUES;
393 cmd.buffer.block_data[0] = param;
394 break;
395 case CPIA2_CMD_SET_USER_EFFECTS: /* Note: Be careful with this as
396 this register can also affect
397 flicker modes */
398 cmd.buffer.block_data[0] = param; /* Then fall through */
399 case CPIA2_CMD_GET_USER_EFFECTS:
400 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
401 cmd.reg_count = 1;
402 if (device == DEVICE_STV_672)
403 cmd.start = CPIA2_VP4_USER_EFFECTS;
404 else
405 cmd.start = CPIA2_VP5_USER_EFFECTS;
406 break;
407 default:
408 LOG("DoCommand received invalid command\n");
409 return -EINVAL;
410 }
411
412 retval = cpia2_send_command(cam, &cmd);
413 if (retval) {
414 return retval;
415 }
416
417 /***
418 * Now copy any results from a read into the appropriate param struct.
419 ***/
420 switch (command) {
421 case CPIA2_CMD_GET_VERSION:
422 cam->params.version.firmware_revision_hi =
423 cmd.buffer.block_data[0];
424 cam->params.version.firmware_revision_lo =
425 cmd.buffer.block_data[1];
426 break;
427 case CPIA2_CMD_GET_PNP_ID:
428 cam->params.pnp_id.vendor = (cmd.buffer.block_data[0] << 8) |
429 cmd.buffer.block_data[1];
430 cam->params.pnp_id.product = (cmd.buffer.block_data[2] << 8) |
431 cmd.buffer.block_data[3];
432 cam->params.pnp_id.device_revision =
433 (cmd.buffer.block_data[4] << 8) |
434 cmd.buffer.block_data[5];
435 if (cam->params.pnp_id.vendor == 0x553) {
436 if (cam->params.pnp_id.product == 0x100) {
437 cam->params.pnp_id.device_type = DEVICE_STV_672;
438 } else if (cam->params.pnp_id.product == 0x140 ||
439 cam->params.pnp_id.product == 0x151) {
440 cam->params.pnp_id.device_type = DEVICE_STV_676;
441 }
442 }
443 break;
444 case CPIA2_CMD_GET_ASIC_TYPE:
445 cam->params.version.asic_id = cmd.buffer.block_data[0];
446 cam->params.version.asic_rev = cmd.buffer.block_data[1];
447 break;
448 case CPIA2_CMD_GET_SENSOR:
449 cam->params.version.sensor_flags = cmd.buffer.block_data[0];
450 cam->params.version.sensor_rev = cmd.buffer.block_data[1];
451 break;
452 case CPIA2_CMD_GET_VP_DEVICE:
453 cam->params.version.vp_device_hi = cmd.buffer.block_data[0];
454 cam->params.version.vp_device_lo = cmd.buffer.block_data[1];
455 break;
456 case CPIA2_CMD_GET_VP_BRIGHTNESS:
457 cam->params.color_params.brightness = cmd.buffer.block_data[0];
458 break;
459 case CPIA2_CMD_GET_CONTRAST:
460 cam->params.color_params.contrast = cmd.buffer.block_data[0];
461 break;
462 case CPIA2_CMD_GET_VP_SATURATION:
463 cam->params.color_params.saturation = cmd.buffer.block_data[0];
464 break;
465 case CPIA2_CMD_GET_VP_GPIO_DATA:
466 cam->params.vp_params.gpio_data = cmd.buffer.block_data[0];
467 break;
468 case CPIA2_CMD_GET_VP_GPIO_DIRECTION:
469 cam->params.vp_params.gpio_direction = cmd.buffer.block_data[0];
470 break;
471 case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION:
472 cam->params.vc_params.vc_mp_direction =cmd.buffer.block_data[0];
473 break;
474 case CPIA2_CMD_GET_VC_MP_GPIO_DATA:
475 cam->params.vc_params.vc_mp_data = cmd.buffer.block_data[0];
476 break;
477 case CPIA2_CMD_GET_FLICKER_MODES:
478 cam->params.flicker_control.cam_register =
479 cmd.buffer.block_data[0];
480 break;
481 case CPIA2_CMD_GET_WAKEUP:
482 cam->params.vc_params.wakeup = cmd.buffer.block_data[0];
483 break;
484 case CPIA2_CMD_GET_PW_CONTROL:
485 cam->params.vc_params.pw_control = cmd.buffer.block_data[0];
486 break;
487 case CPIA2_CMD_GET_SYSTEM_CTRL:
488 cam->params.camera_state.system_ctrl = cmd.buffer.block_data[0];
489 break;
490 case CPIA2_CMD_GET_VP_SYSTEM_STATE:
491 cam->params.vp_params.system_state = cmd.buffer.block_data[0];
492 break;
493 case CPIA2_CMD_GET_VP_SYSTEM_CTRL:
494 cam->params.vp_params.system_ctrl = cmd.buffer.block_data[0];
495 break;
496 case CPIA2_CMD_GET_VP_EXP_MODES:
497 cam->params.vp_params.exposure_modes = cmd.buffer.block_data[0];
498 break;
499 case CPIA2_CMD_GET_DEVICE_CONFIG:
500 cam->params.vp_params.device_config = cmd.buffer.block_data[0];
501 break;
502 case CPIA2_CMD_GET_VC_CONTROL:
503 cam->params.vc_params.vc_control = cmd.buffer.block_data[0];
504 break;
505 case CPIA2_CMD_GET_USER_MODE:
506 cam->params.vp_params.video_mode = cmd.buffer.block_data[0];
507 break;
508 case CPIA2_CMD_GET_USER_EFFECTS:
509 cam->params.vp_params.user_effects = cmd.buffer.block_data[0];
510 break;
511 default:
512 break;
513 }
514 return retval;
515}
516
517/******************************************************************************
518 *
519 * cpia2_send_command
520 *
521 *****************************************************************************/
522int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd)
523{
524 u8 count;
525 u8 start;
526 u8 block_index;
527 u8 *buffer;
528 int retval;
529 const char* dir;
530
531 if (cmd->direction == TRANSFER_WRITE) {
532 dir = "Write";
533 } else {
534 dir = "Read";
535 }
536
537 block_index = cmd->req_mode & 0x03;
538
539 switch (cmd->req_mode & 0x0c) {
540 case CAMERAACCESS_TYPE_RANDOM:
541 count = cmd->reg_count * sizeof(struct cpia2_register);
542 start = 0;
543 buffer = (u8 *) & cmd->buffer;
544 if (debugs_on & DEBUG_REG)
545 DBG("%s Random: Register block %s\n", dir,
546 block_name[block_index]);
547 break;
548 case CAMERAACCESS_TYPE_BLOCK:
549 count = cmd->reg_count;
550 start = cmd->start;
551 buffer = cmd->buffer.block_data;
552 if (debugs_on & DEBUG_REG)
553 DBG("%s Block: Register block %s\n", dir,
554 block_name[block_index]);
555 break;
556 case CAMERAACCESS_TYPE_MASK:
557 count = cmd->reg_count * sizeof(struct cpia2_reg_mask);
558 start = 0;
559 buffer = (u8 *) & cmd->buffer;
560 if (debugs_on & DEBUG_REG)
561 DBG("%s Mask: Register block %s\n", dir,
562 block_name[block_index]);
563 break;
564 case CAMERAACCESS_TYPE_REPEAT: /* For patch blocks only */
565 count = cmd->reg_count;
566 start = cmd->start;
567 buffer = cmd->buffer.block_data;
568 if (debugs_on & DEBUG_REG)
569 DBG("%s Repeat: Register block %s\n", dir,
570 block_name[block_index]);
571 break;
572 default:
573 LOG("%s: invalid request mode\n",__FUNCTION__);
574 return -EINVAL;
575 }
576
577 retval = cpia2_usb_transfer_cmd(cam,
578 buffer,
579 cmd->req_mode,
580 start, count, cmd->direction);
581#ifdef _CPIA2_DEBUG_
582 if (debugs_on & DEBUG_REG) {
583 int i;
584 for (i = 0; i < cmd->reg_count; i++) {
585 if((cmd->req_mode & 0x0c) == CAMERAACCESS_TYPE_BLOCK)
586 KINFO("%s Block: [0x%02X] = 0x%02X\n",
587 dir, start + i, buffer[i]);
588 if((cmd->req_mode & 0x0c) == CAMERAACCESS_TYPE_RANDOM)
589 KINFO("%s Random: [0x%02X] = 0x%02X\n",
590 dir, cmd->buffer.registers[i].index,
591 cmd->buffer.registers[i].value);
592 }
593 }
594#endif
595
596 return retval;
597};
598
599/*************
600 * Functions to implement camera functionality
601 *************/
602/******************************************************************************
603 *
604 * cpia2_get_version_info
605 *
606 *****************************************************************************/
607static void cpia2_get_version_info(struct camera_data *cam)
608{
609 cpia2_do_command(cam, CPIA2_CMD_GET_VERSION, TRANSFER_READ, 0);
610 cpia2_do_command(cam, CPIA2_CMD_GET_PNP_ID, TRANSFER_READ, 0);
611 cpia2_do_command(cam, CPIA2_CMD_GET_ASIC_TYPE, TRANSFER_READ, 0);
612 cpia2_do_command(cam, CPIA2_CMD_GET_SENSOR, TRANSFER_READ, 0);
613 cpia2_do_command(cam, CPIA2_CMD_GET_VP_DEVICE, TRANSFER_READ, 0);
614}
615
616/******************************************************************************
617 *
618 * cpia2_reset_camera
619 *
620 * Called at least during the open process, sets up initial params.
621 *****************************************************************************/
622int cpia2_reset_camera(struct camera_data *cam)
623{
624 u8 tmp_reg;
625 int retval = 0;
626 int i;
627 struct cpia2_command cmd;
628
629 /***
630 * VC setup
631 ***/
632 retval = configure_sensor(cam,
633 cam->params.roi.width,
634 cam->params.roi.height);
635 if (retval < 0) {
636 ERR("Couldn't configure sensor, error=%d\n", retval);
637 return retval;
638 }
639
640 /* Clear FIFO and route/enable stream block */
641 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC;
642 cmd.direction = TRANSFER_WRITE;
643 cmd.reg_count = 2;
644 cmd.buffer.registers[0].index = CPIA2_VC_ST_CTRL;
645 cmd.buffer.registers[0].value = CPIA2_VC_ST_CTRL_SRC_VC |
646 CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT;
647 cmd.buffer.registers[1].index = CPIA2_VC_ST_CTRL;
648 cmd.buffer.registers[1].value = CPIA2_VC_ST_CTRL_SRC_VC |
649 CPIA2_VC_ST_CTRL_DST_USB |
650 CPIA2_VC_ST_CTRL_EOF_DETECT | CPIA2_VC_ST_CTRL_FIFO_ENABLE;
651
652 cpia2_send_command(cam, &cmd);
653
654 cpia2_set_high_power(cam);
655
656 if (cam->params.pnp_id.device_type == DEVICE_STV_672) {
657 /* Enable button notification */
658 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_SYSTEM;
659 cmd.buffer.registers[0].index = CPIA2_SYSTEM_INT_PACKET_CTRL;
660 cmd.buffer.registers[0].value =
661 CPIA2_SYSTEM_INT_PACKET_CTRL_ENABLE_SW_XX;
662 cmd.reg_count = 1;
663 cpia2_send_command(cam, &cmd);
664 }
665
666 current->state = TASK_INTERRUPTIBLE;
667 schedule_timeout(100 * HZ / 1000); /* wait for 100 msecs */
668
669 if (cam->params.pnp_id.device_type == DEVICE_STV_672)
670 retval = apply_vp_patch(cam);
671
672 /* wait for vp to go to sleep */
673 current->state = TASK_INTERRUPTIBLE;
674 schedule_timeout(100 * HZ / 1000); /* wait for 100 msecs */
675
676 /***
677 * If this is a 676, apply VP5 fixes before we start streaming
678 ***/
679 if (cam->params.pnp_id.device_type == DEVICE_STV_676) {
680 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP;
681
682 /* The following writes improve the picture */
683 cmd.buffer.registers[0].index = CPIA2_VP5_MYBLACK_LEVEL;
684 cmd.buffer.registers[0].value = 0; /* reduce from the default
685 * rec 601 pedestal of 16 */
686 cmd.buffer.registers[1].index = CPIA2_VP5_MCYRANGE;
687 cmd.buffer.registers[1].value = 0x92; /* increase from 100% to
688 * (256/256 - 31) to fill
689 * available range */
690 cmd.buffer.registers[2].index = CPIA2_VP5_MYCEILING;
691 cmd.buffer.registers[2].value = 0xFF; /* Increase from the
692 * default rec 601 ceiling
693 * of 240 */
694 cmd.buffer.registers[3].index = CPIA2_VP5_MCUVSATURATION;
695 cmd.buffer.registers[3].value = 0xFF; /* Increase from the rec
696 * 601 100% level (128)
697 * to 145-192 */
698 cmd.buffer.registers[4].index = CPIA2_VP5_ANTIFLKRSETUP;
699 cmd.buffer.registers[4].value = 0x80; /* Inhibit the
700 * anti-flicker */
701
702 /* The following 4 writes are a fix to allow QVGA to work at 30 fps */
703 cmd.buffer.registers[5].index = CPIA2_VP_RAM_ADDR_H;
704 cmd.buffer.registers[5].value = 0x01;
705 cmd.buffer.registers[6].index = CPIA2_VP_RAM_ADDR_L;
706 cmd.buffer.registers[6].value = 0xE3;
707 cmd.buffer.registers[7].index = CPIA2_VP_RAM_DATA;
708 cmd.buffer.registers[7].value = 0x02;
709 cmd.buffer.registers[8].index = CPIA2_VP_RAM_DATA;
710 cmd.buffer.registers[8].value = 0xFC;
711
712 cmd.direction = TRANSFER_WRITE;
713 cmd.reg_count = 9;
714
715 cpia2_send_command(cam, &cmd);
716 }
717
718 /* Activate all settings and start the data stream */
719 /* Set user mode */
720 set_default_user_mode(cam);
721
722 /* Give VP time to wake up */
723 current->state = TASK_INTERRUPTIBLE;
724 schedule_timeout(100 * HZ / 1000); /* wait for 100 msecs */
725
726 set_all_properties(cam);
727
728 cpia2_do_command(cam, CPIA2_CMD_GET_USER_MODE, TRANSFER_READ, 0);
729 DBG("After SetAllProperties(cam), user mode is 0x%0X\n",
730 cam->params.vp_params.video_mode);
731
732 /***
733 * Set audio regulator off. This and the code to set the compresison
734 * state are too complex to form a CPIA2_CMD_, and seem to be somewhat
735 * intertwined. This stuff came straight from the windows driver.
736 ***/
737 /* Turn AutoExposure off in VP and enable the serial bridge to the sensor */
738 cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_CTRL, TRANSFER_READ, 0);
739 tmp_reg = cam->params.vp_params.system_ctrl;
740 cmd.buffer.registers[0].value = tmp_reg &
741 (tmp_reg & (CPIA2_VP_SYSTEMCTRL_HK_CONTROL ^ 0xFF));
742
743 cpia2_do_command(cam, CPIA2_CMD_GET_DEVICE_CONFIG, TRANSFER_READ, 0);
744 cmd.buffer.registers[1].value = cam->params.vp_params.device_config |
745 CPIA2_VP_DEVICE_CONFIG_SERIAL_BRIDGE;
746 cmd.buffer.registers[0].index = CPIA2_VP_SYSTEMCTRL;
747 cmd.buffer.registers[1].index = CPIA2_VP_DEVICE_CONFIG;
748 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP;
749 cmd.reg_count = 2;
750 cmd.direction = TRANSFER_WRITE;
751 cmd.start = 0;
752 cpia2_send_command(cam, &cmd);
753
754 /* Set the correct I2C address in the CPiA-2 system register */
755 cpia2_do_command(cam,
756 CPIA2_CMD_SET_SERIAL_ADDR,
757 TRANSFER_WRITE,
758 CPIA2_SYSTEM_VP_SERIAL_ADDR_SENSOR);
759
760 /* Now have sensor access - set bit to turn the audio regulator off */
761 cpia2_do_command(cam,
762 CPIA2_CMD_SET_SENSOR_CR1,
763 TRANSFER_WRITE, CPIA2_SENSOR_CR1_DOWN_AUDIO_REGULATOR);
764
765 /* Set the correct I2C address in the CPiA-2 system register */
766 if (cam->params.pnp_id.device_type == DEVICE_STV_672)
767 cpia2_do_command(cam,
768 CPIA2_CMD_SET_SERIAL_ADDR,
769 TRANSFER_WRITE,
770 CPIA2_SYSTEM_VP_SERIAL_ADDR_VP); // 0x88
771 else
772 cpia2_do_command(cam,
773 CPIA2_CMD_SET_SERIAL_ADDR,
774 TRANSFER_WRITE,
775 CPIA2_SYSTEM_VP_SERIAL_ADDR_676_VP); // 0x8a
776
777 /* increase signal drive strength */
778 if (cam->params.pnp_id.device_type == DEVICE_STV_676)
779 cpia2_do_command(cam,
780 CPIA2_CMD_SET_VP_EXP_MODES,
781 TRANSFER_WRITE,
782 CPIA2_VP_EXPOSURE_MODES_COMPILE_EXP);
783
784 /* Start autoexposure */
785 cpia2_do_command(cam, CPIA2_CMD_GET_DEVICE_CONFIG, TRANSFER_READ, 0);
786 cmd.buffer.registers[0].value = cam->params.vp_params.device_config &
787 (CPIA2_VP_DEVICE_CONFIG_SERIAL_BRIDGE ^ 0xFF);
788
789 cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_CTRL, TRANSFER_READ, 0);
790 cmd.buffer.registers[1].value =
791 cam->params.vp_params.system_ctrl | CPIA2_VP_SYSTEMCTRL_HK_CONTROL;
792
793 cmd.buffer.registers[0].index = CPIA2_VP_DEVICE_CONFIG;
794 cmd.buffer.registers[1].index = CPIA2_VP_SYSTEMCTRL;
795 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP;
796 cmd.reg_count = 2;
797 cmd.direction = TRANSFER_WRITE;
798
799 cpia2_send_command(cam, &cmd);
800
801 /* Set compression state */
802 cpia2_do_command(cam, CPIA2_CMD_GET_VC_CONTROL, TRANSFER_READ, 0);
803 if (cam->params.compression.inhibit_htables) {
804 tmp_reg = cam->params.vc_params.vc_control |
805 CPIA2_VC_VC_CTRL_INHIBIT_H_TABLES;
806 } else {
807 tmp_reg = cam->params.vc_params.vc_control &
808 ~CPIA2_VC_VC_CTRL_INHIBIT_H_TABLES;
809 }
810 cpia2_do_command(cam, CPIA2_CMD_SET_VC_CONTROL, TRANSFER_WRITE,tmp_reg);
811
812 /* Set target size (kb) on vc */
813 cpia2_do_command(cam, CPIA2_CMD_SET_TARGET_KB,
814 TRANSFER_WRITE, cam->params.vc_params.target_kb);
815
816 /* Wiggle VC Reset */
817 /***
818 * First read and wait a bit.
819 ***/
820 for (i = 0; i < 50; i++) {
821 cpia2_do_command(cam, CPIA2_CMD_GET_PW_CONTROL,
822 TRANSFER_READ, 0);
823 }
824
825 tmp_reg = cam->params.vc_params.pw_control;
826 tmp_reg &= ~CPIA2_VC_PW_CTRL_VC_RESET_N;
827
828 cpia2_do_command(cam, CPIA2_CMD_SET_PW_CONTROL, TRANSFER_WRITE,tmp_reg);
829
830 tmp_reg |= CPIA2_VC_PW_CTRL_VC_RESET_N;
831 cpia2_do_command(cam, CPIA2_CMD_SET_PW_CONTROL, TRANSFER_WRITE,tmp_reg);
832
833 cpia2_do_command(cam, CPIA2_CMD_SET_DEF_JPEG_OPT, TRANSFER_WRITE, 0);
834
835 cpia2_do_command(cam, CPIA2_CMD_GET_USER_MODE, TRANSFER_READ, 0);
836 DBG("After VC RESET, user mode is 0x%0X\n",
837 cam->params.vp_params.video_mode);
838
839 return retval;
840}
841
842/******************************************************************************
843 *
844 * cpia2_set_high_power
845 *
846 *****************************************************************************/
847static int cpia2_set_high_power(struct camera_data *cam)
848{
849 int i;
850 for (i = 0; i <= 50; i++) {
851 /* Read system status */
852 cpia2_do_command(cam,CPIA2_CMD_GET_SYSTEM_CTRL,TRANSFER_READ,0);
853
854 /* If there is an error, clear it */
855 if(cam->params.camera_state.system_ctrl &
856 CPIA2_SYSTEM_CONTROL_V2W_ERR)
857 cpia2_do_command(cam, CPIA2_CMD_CLEAR_V2W_ERR,
858 TRANSFER_WRITE, 0);
859
860 /* Try to set high power mode */
861 cpia2_do_command(cam, CPIA2_CMD_SET_SYSTEM_CTRL,
862 TRANSFER_WRITE, 1);
863
864 /* Try to read something in VP to check if everything is awake */
865 cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_STATE,
866 TRANSFER_READ, 0);
867 if (cam->params.vp_params.system_state &
868 CPIA2_VP_SYSTEMSTATE_HK_ALIVE) {
869 break;
870 } else if (i == 50) {
871 cam->params.camera_state.power_mode = LO_POWER_MODE;
872 ERR("Camera did not wake up\n");
873 return -EIO;
874 }
875 }
876
877 DBG("System now in high power state\n");
878 cam->params.camera_state.power_mode = HI_POWER_MODE;
879 return 0;
880}
881
882/******************************************************************************
883 *
884 * cpia2_set_low_power
885 *
886 *****************************************************************************/
887int cpia2_set_low_power(struct camera_data *cam)
888{
889 cam->params.camera_state.power_mode = LO_POWER_MODE;
890 cpia2_do_command(cam, CPIA2_CMD_SET_SYSTEM_CTRL, TRANSFER_WRITE, 0);
891 return 0;
892}
893
894/******************************************************************************
895 *
896 * apply_vp_patch
897 *
898 *****************************************************************************/
899static int apply_vp_patch(struct camera_data *cam)
900{
901 int i, j;
902 struct cpia2_command cmd;
903
904 cmd.req_mode = CAMERAACCESS_TYPE_REPEAT | CAMERAACCESS_VP;
905 cmd.direction = TRANSFER_WRITE;
906
907 for (i = 0; i < PATCH_DATA_SIZE; i++) {
908 for (j = 0; j < patch_data[i].count; j++) {
909 cmd.buffer.block_data[j] = patch_data[i].data[j];
910 }
911
912 cmd.start = patch_data[i].reg;
913 cmd.reg_count = patch_data[i].count;
914 cpia2_send_command(cam, &cmd);
915 }
916
917 return 0;
918}
919
920/******************************************************************************
921 *
922 * set_default_user_mode
923 *
924 *****************************************************************************/
925static int set_default_user_mode(struct camera_data *cam)
926{
927 unsigned char user_mode;
928 unsigned char frame_rate;
929 int width = cam->params.roi.width;
930 int height = cam->params.roi.height;
931
932 switch (cam->params.version.sensor_flags) {
933 case CPIA2_VP_SENSOR_FLAGS_404:
934 case CPIA2_VP_SENSOR_FLAGS_407:
935 case CPIA2_VP_SENSOR_FLAGS_409:
936 case CPIA2_VP_SENSOR_FLAGS_410:
937 if ((width > STV_IMAGE_QCIF_COLS)
938 || (height > STV_IMAGE_QCIF_ROWS)) {
939 user_mode = CPIA2_VP_USER_MODE_CIF;
940 } else {
941 user_mode = CPIA2_VP_USER_MODE_QCIFDS;
942 }
943 frame_rate = CPIA2_VP_FRAMERATE_30;
944 break;
945 case CPIA2_VP_SENSOR_FLAGS_500:
946 if ((width > STV_IMAGE_CIF_COLS)
947 || (height > STV_IMAGE_CIF_ROWS)) {
948 user_mode = CPIA2_VP_USER_MODE_VGA;
949 } else {
950 user_mode = CPIA2_VP_USER_MODE_QVGADS;
951 }
952 if (cam->params.pnp_id.device_type == DEVICE_STV_672)
953 frame_rate = CPIA2_VP_FRAMERATE_15;
954 else
955 frame_rate = CPIA2_VP_FRAMERATE_30;
956 break;
957 default:
958 LOG("%s: Invalid sensor flag value 0x%0X\n",__FUNCTION__,
959 cam->params.version.sensor_flags);
960 return -EINVAL;
961 }
962
963 DBG("Sensor flag = 0x%0x, user mode = 0x%0x, frame rate = 0x%X\n",
964 cam->params.version.sensor_flags, user_mode, frame_rate);
965 cpia2_do_command(cam, CPIA2_CMD_SET_USER_MODE, TRANSFER_WRITE,
966 user_mode);
967 if(cam->params.vp_params.frame_rate > 0 &&
968 frame_rate > cam->params.vp_params.frame_rate)
969 frame_rate = cam->params.vp_params.frame_rate;
970
971 cpia2_set_fps(cam, frame_rate);
972
973// if (cam->params.pnp_id.device_type == DEVICE_STV_676)
974// cpia2_do_command(cam,
975// CPIA2_CMD_SET_VP_SYSTEM_CTRL,
976// TRANSFER_WRITE,
977// CPIA2_VP_SYSTEMCTRL_HK_CONTROL |
978// CPIA2_VP_SYSTEMCTRL_POWER_CONTROL);
979
980 return 0;
981}
982
983/******************************************************************************
984 *
985 * cpia2_match_video_size
986 *
987 * return the best match, where 'best' is as always
988 * the largest that is not bigger than what is requested.
989 *****************************************************************************/
990int cpia2_match_video_size(int width, int height)
991{
992 if (width >= STV_IMAGE_VGA_COLS && height >= STV_IMAGE_VGA_ROWS)
993 return VIDEOSIZE_VGA;
994
995 if (width >= STV_IMAGE_CIF_COLS && height >= STV_IMAGE_CIF_ROWS)
996 return VIDEOSIZE_CIF;
997
998 if (width >= STV_IMAGE_QVGA_COLS && height >= STV_IMAGE_QVGA_ROWS)
999 return VIDEOSIZE_QVGA;
1000
1001 if (width >= 288 && height >= 216)
1002 return VIDEOSIZE_288_216;
1003
1004 if (width >= 256 && height >= 192)
1005 return VIDEOSIZE_256_192;
1006
1007 if (width >= 224 && height >= 168)
1008 return VIDEOSIZE_224_168;
1009
1010 if (width >= 192 && height >= 144)
1011 return VIDEOSIZE_192_144;
1012
1013 if (width >= STV_IMAGE_QCIF_COLS && height >= STV_IMAGE_QCIF_ROWS)
1014 return VIDEOSIZE_QCIF;
1015
1016 return -1;
1017}
1018
1019/******************************************************************************
1020 *
1021 * SetVideoSize
1022 *
1023 *****************************************************************************/
1024static int set_vw_size(struct camera_data *cam, int size)
1025{
1026 int retval = 0;
1027
1028 cam->params.vp_params.video_size = size;
1029
1030 switch (size) {
1031 case VIDEOSIZE_VGA:
1032 DBG("Setting size to VGA\n");
1033 cam->params.roi.width = STV_IMAGE_VGA_COLS;
1034 cam->params.roi.height = STV_IMAGE_VGA_ROWS;
1035 cam->vw.width = STV_IMAGE_VGA_COLS;
1036 cam->vw.height = STV_IMAGE_VGA_ROWS;
1037 break;
1038 case VIDEOSIZE_CIF:
1039 DBG("Setting size to CIF\n");
1040 cam->params.roi.width = STV_IMAGE_CIF_COLS;
1041 cam->params.roi.height = STV_IMAGE_CIF_ROWS;
1042 cam->vw.width = STV_IMAGE_CIF_COLS;
1043 cam->vw.height = STV_IMAGE_CIF_ROWS;
1044 break;
1045 case VIDEOSIZE_QVGA:
1046 DBG("Setting size to QVGA\n");
1047 cam->params.roi.width = STV_IMAGE_QVGA_COLS;
1048 cam->params.roi.height = STV_IMAGE_QVGA_ROWS;
1049 cam->vw.width = STV_IMAGE_QVGA_COLS;
1050 cam->vw.height = STV_IMAGE_QVGA_ROWS;
1051 break;
1052 case VIDEOSIZE_288_216:
1053 cam->params.roi.width = 288;
1054 cam->params.roi.height = 216;
1055 cam->vw.width = 288;
1056 cam->vw.height = 216;
1057 break;
1058 case VIDEOSIZE_256_192:
1059 cam->vw.width = 256;
1060 cam->vw.height = 192;
1061 cam->params.roi.width = 256;
1062 cam->params.roi.height = 192;
1063 break;
1064 case VIDEOSIZE_224_168:
1065 cam->vw.width = 224;
1066 cam->vw.height = 168;
1067 cam->params.roi.width = 224;
1068 cam->params.roi.height = 168;
1069 break;
1070 case VIDEOSIZE_192_144:
1071 cam->vw.width = 192;
1072 cam->vw.height = 144;
1073 cam->params.roi.width = 192;
1074 cam->params.roi.height = 144;
1075 break;
1076 case VIDEOSIZE_QCIF:
1077 DBG("Setting size to QCIF\n");
1078 cam->params.roi.width = STV_IMAGE_QCIF_COLS;
1079 cam->params.roi.height = STV_IMAGE_QCIF_ROWS;
1080 cam->vw.width = STV_IMAGE_QCIF_COLS;
1081 cam->vw.height = STV_IMAGE_QCIF_ROWS;
1082 break;
1083 default:
1084 retval = -EINVAL;
1085 }
1086 return retval;
1087}
1088
1089/******************************************************************************
1090 *
1091 * configure_sensor
1092 *
1093 *****************************************************************************/
1094static int configure_sensor(struct camera_data *cam,
1095 int req_width, int req_height)
1096{
1097 int retval;
1098
1099 switch (cam->params.version.sensor_flags) {
1100 case CPIA2_VP_SENSOR_FLAGS_404:
1101 case CPIA2_VP_SENSOR_FLAGS_407:
1102 case CPIA2_VP_SENSOR_FLAGS_409:
1103 case CPIA2_VP_SENSOR_FLAGS_410:
1104 retval = config_sensor_410(cam, req_width, req_height);
1105 break;
1106 case CPIA2_VP_SENSOR_FLAGS_500:
1107 retval = config_sensor_500(cam, req_width, req_height);
1108 break;
1109 default:
1110 return -EINVAL;
1111 }
1112
1113 return retval;
1114}
1115
1116/******************************************************************************
1117 *
1118 * config_sensor_410
1119 *
1120 *****************************************************************************/
1121static int config_sensor_410(struct camera_data *cam,
1122 int req_width, int req_height)
1123{
1124 struct cpia2_command cmd;
1125 int i = 0;
1126 int image_size;
1127 int image_type;
1128 int width = req_width;
1129 int height = req_height;
1130
1131 /***
1132 * Make sure size doesn't exceed CIF.
1133 ***/
1134 if (width > STV_IMAGE_CIF_COLS)
1135 width = STV_IMAGE_CIF_COLS;
1136 if (height > STV_IMAGE_CIF_ROWS)
1137 height = STV_IMAGE_CIF_ROWS;
1138
1139 image_size = cpia2_match_video_size(width, height);
1140
1141 DBG("Config 410: width = %d, height = %d\n", width, height);
1142 DBG("Image size returned is %d\n", image_size);
1143 if (image_size >= 0) {
1144 set_vw_size(cam, image_size);
1145 width = cam->params.roi.width;
1146 height = cam->params.roi.height;
1147
1148 DBG("After set_vw_size(), width = %d, height = %d\n",
1149 width, height);
1150 if (width <= 176 && height <= 144) {
1151 DBG("image type = VIDEOSIZE_QCIF\n");
1152 image_type = VIDEOSIZE_QCIF;
1153 }
1154 else if (width <= 320 && height <= 240) {
1155 DBG("image type = VIDEOSIZE_QVGA\n");
1156 image_type = VIDEOSIZE_QVGA;
1157 }
1158 else {
1159 DBG("image type = VIDEOSIZE_CIF\n");
1160 image_type = VIDEOSIZE_CIF;
1161 }
1162 } else {
1163 ERR("ConfigSensor410 failed\n");
1164 return -EINVAL;
1165 }
1166
1167 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC;
1168 cmd.direction = TRANSFER_WRITE;
1169
1170 /* VC Format */
1171 cmd.buffer.registers[i].index = CPIA2_VC_VC_FORMAT;
1172 if (image_type == VIDEOSIZE_CIF) {
1173 cmd.buffer.registers[i++].value =
1174 (u8) (CPIA2_VC_VC_FORMAT_UFIRST |
1175 CPIA2_VC_VC_FORMAT_SHORTLINE);
1176 } else {
1177 cmd.buffer.registers[i++].value =
1178 (u8) CPIA2_VC_VC_FORMAT_UFIRST;
1179 }
1180
1181 /* VC Clocks */
1182 cmd.buffer.registers[i].index = CPIA2_VC_VC_CLOCKS;
1183 if (image_type == VIDEOSIZE_QCIF) {
1184 if (cam->params.pnp_id.device_type == DEVICE_STV_672) {
1185 cmd.buffer.registers[i++].value=
1186 (u8)(CPIA2_VC_VC_672_CLOCKS_CIF_DIV_BY_3 |
1187 CPIA2_VC_VC_672_CLOCKS_SCALING |
1188 CPIA2_VC_VC_CLOCKS_LOGDIV2);
1189 DBG("VC_Clocks (0xc4) should be B\n");
1190 }
1191 else {
1192 cmd.buffer.registers[i++].value=
1193 (u8)(CPIA2_VC_VC_676_CLOCKS_CIF_DIV_BY_3 |
1194 CPIA2_VC_VC_CLOCKS_LOGDIV2);
1195 }
1196 } else {
1197 if (cam->params.pnp_id.device_type == DEVICE_STV_672) {
1198 cmd.buffer.registers[i++].value =
1199 (u8) (CPIA2_VC_VC_672_CLOCKS_CIF_DIV_BY_3 |
1200 CPIA2_VC_VC_CLOCKS_LOGDIV0);
1201 }
1202 else {
1203 cmd.buffer.registers[i++].value =
1204 (u8) (CPIA2_VC_VC_676_CLOCKS_CIF_DIV_BY_3 |
1205 CPIA2_VC_VC_676_CLOCKS_SCALING |
1206 CPIA2_VC_VC_CLOCKS_LOGDIV0);
1207 }
1208 }
1209 DBG("VC_Clocks (0xc4) = 0x%0X\n", cmd.buffer.registers[i-1].value);
1210
1211 /* Input reqWidth from VC */
1212 cmd.buffer.registers[i].index = CPIA2_VC_VC_IHSIZE_LO;
1213 if (image_type == VIDEOSIZE_QCIF)
1214 cmd.buffer.registers[i++].value =
1215 (u8) (STV_IMAGE_QCIF_COLS / 4);
1216 else
1217 cmd.buffer.registers[i++].value =
1218 (u8) (STV_IMAGE_CIF_COLS / 4);
1219
1220 /* Timings */
1221 cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_HI;
1222 if (image_type == VIDEOSIZE_QCIF)
1223 cmd.buffer.registers[i++].value = (u8) 0;
1224 else
1225 cmd.buffer.registers[i++].value = (u8) 1;
1226
1227 cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_LO;
1228 if (image_type == VIDEOSIZE_QCIF)
1229 cmd.buffer.registers[i++].value = (u8) 208;
1230 else
1231 cmd.buffer.registers[i++].value = (u8) 160;
1232
1233 cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_HI;
1234 if (image_type == VIDEOSIZE_QCIF)
1235 cmd.buffer.registers[i++].value = (u8) 0;
1236 else
1237 cmd.buffer.registers[i++].value = (u8) 1;
1238
1239 cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_LO;
1240 if (image_type == VIDEOSIZE_QCIF)
1241 cmd.buffer.registers[i++].value = (u8) 160;
1242 else
1243 cmd.buffer.registers[i++].value = (u8) 64;
1244
1245 /* Output Image Size */
1246 cmd.buffer.registers[i].index = CPIA2_VC_VC_OHSIZE;
1247 cmd.buffer.registers[i++].value = cam->params.roi.width / 4;
1248
1249 cmd.buffer.registers[i].index = CPIA2_VC_VC_OVSIZE;
1250 cmd.buffer.registers[i++].value = cam->params.roi.height / 4;
1251
1252 /* Cropping */
1253 cmd.buffer.registers[i].index = CPIA2_VC_VC_HCROP;
1254 if (image_type == VIDEOSIZE_QCIF)
1255 cmd.buffer.registers[i++].value =
1256 (u8) (((STV_IMAGE_QCIF_COLS / 4) - (width / 4)) / 2);
1257 else
1258 cmd.buffer.registers[i++].value =
1259 (u8) (((STV_IMAGE_CIF_COLS / 4) - (width / 4)) / 2);
1260
1261 cmd.buffer.registers[i].index = CPIA2_VC_VC_VCROP;
1262 if (image_type == VIDEOSIZE_QCIF)
1263 cmd.buffer.registers[i++].value =
1264 (u8) (((STV_IMAGE_QCIF_ROWS / 4) - (height / 4)) / 2);
1265 else
1266 cmd.buffer.registers[i++].value =
1267 (u8) (((STV_IMAGE_CIF_ROWS / 4) - (height / 4)) / 2);
1268
1269 /* Scaling registers (defaults) */
1270 cmd.buffer.registers[i].index = CPIA2_VC_VC_HPHASE;
1271 cmd.buffer.registers[i++].value = (u8) 0;
1272
1273 cmd.buffer.registers[i].index = CPIA2_VC_VC_VPHASE;
1274 cmd.buffer.registers[i++].value = (u8) 0;
1275
1276 cmd.buffer.registers[i].index = CPIA2_VC_VC_HISPAN;
1277 cmd.buffer.registers[i++].value = (u8) 31;
1278
1279 cmd.buffer.registers[i].index = CPIA2_VC_VC_VISPAN;
1280 cmd.buffer.registers[i++].value = (u8) 31;
1281
1282 cmd.buffer.registers[i].index = CPIA2_VC_VC_HICROP;
1283 cmd.buffer.registers[i++].value = (u8) 0;
1284
1285 cmd.buffer.registers[i].index = CPIA2_VC_VC_VICROP;
1286 cmd.buffer.registers[i++].value = (u8) 0;
1287
1288 cmd.buffer.registers[i].index = CPIA2_VC_VC_HFRACT;
1289 cmd.buffer.registers[i++].value = (u8) 0x81; /* = 8/1 = 8 (HIBYTE/LOBYTE) */
1290
1291 cmd.buffer.registers[i].index = CPIA2_VC_VC_VFRACT;
1292 cmd.buffer.registers[i++].value = (u8) 0x81; /* = 8/1 = 8 (HIBYTE/LOBYTE) */
1293
1294 cmd.reg_count = i;
1295
1296 cpia2_send_command(cam, &cmd);
1297
1298 return i;
1299}
1300
1301
1302/******************************************************************************
1303 *
1304 * config_sensor_500(cam)
1305 *
1306 *****************************************************************************/
1307static int config_sensor_500(struct camera_data *cam,
1308 int req_width, int req_height)
1309{
1310 struct cpia2_command cmd;
1311 int i = 0;
1312 int image_size = VIDEOSIZE_CIF;
1313 int image_type = VIDEOSIZE_VGA;
1314 int width = req_width;
1315 int height = req_height;
1316 unsigned int device = cam->params.pnp_id.device_type;
1317
1318 image_size = cpia2_match_video_size(width, height);
1319
1320 if (width > STV_IMAGE_CIF_COLS || height > STV_IMAGE_CIF_ROWS)
1321 image_type = VIDEOSIZE_VGA;
1322 else if (width > STV_IMAGE_QVGA_COLS || height > STV_IMAGE_QVGA_ROWS)
1323 image_type = VIDEOSIZE_CIF;
1324 else if (width > STV_IMAGE_QCIF_COLS || height > STV_IMAGE_QCIF_ROWS)
1325 image_type = VIDEOSIZE_QVGA;
1326 else
1327 image_type = VIDEOSIZE_QCIF;
1328
1329 if (image_size >= 0) {
1330 set_vw_size(cam, image_size);
1331 width = cam->params.roi.width;
1332 height = cam->params.roi.height;
1333 } else {
1334 ERR("ConfigSensor500 failed\n");
1335 return -EINVAL;
1336 }
1337
1338 DBG("image_size = %d, width = %d, height = %d, type = %d\n",
1339 image_size, width, height, image_type);
1340
1341 cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC;
1342 cmd.direction = TRANSFER_WRITE;
1343 i = 0;
1344
1345 /* VC Format */
1346 cmd.buffer.registers[i].index = CPIA2_VC_VC_FORMAT;
1347 cmd.buffer.registers[i].value = (u8) CPIA2_VC_VC_FORMAT_UFIRST;
1348 if (image_type == VIDEOSIZE_QCIF)
1349 cmd.buffer.registers[i].value |= (u8) CPIA2_VC_VC_FORMAT_DECIMATING;
1350 i++;
1351
1352 /* VC Clocks */
1353 cmd.buffer.registers[i].index = CPIA2_VC_VC_CLOCKS;
1354 if (device == DEVICE_STV_672) {
1355 if (image_type == VIDEOSIZE_VGA)
1356 cmd.buffer.registers[i].value =
1357 (u8)CPIA2_VC_VC_CLOCKS_LOGDIV1;
1358 else
1359 cmd.buffer.registers[i].value =
1360 (u8)(CPIA2_VC_VC_672_CLOCKS_SCALING |
1361 CPIA2_VC_VC_CLOCKS_LOGDIV3);
1362 } else {
1363 if (image_type == VIDEOSIZE_VGA)
1364 cmd.buffer.registers[i].value =
1365 (u8)CPIA2_VC_VC_CLOCKS_LOGDIV0;
1366 else
1367 cmd.buffer.registers[i].value =
1368 (u8)(CPIA2_VC_VC_676_CLOCKS_SCALING |
1369 CPIA2_VC_VC_CLOCKS_LOGDIV2);
1370 }
1371 i++;
1372
1373 DBG("VC_CLOCKS = 0x%X\n", cmd.buffer.registers[i-1].value);
1374
1375 /* Input width from VP */
1376 cmd.buffer.registers[i].index = CPIA2_VC_VC_IHSIZE_LO;
1377 if (image_type == VIDEOSIZE_VGA)
1378 cmd.buffer.registers[i].value =
1379 (u8) (STV_IMAGE_VGA_COLS / 4);
1380 else
1381 cmd.buffer.registers[i].value =
1382 (u8) (STV_IMAGE_QVGA_COLS / 4);
1383 i++;
1384 DBG("Input width = %d\n", cmd.buffer.registers[i-1].value);
1385
1386 /* Timings */
1387 cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_HI;
1388 if (image_type == VIDEOSIZE_VGA)
1389 cmd.buffer.registers[i++].value = (u8) 2;
1390 else
1391 cmd.buffer.registers[i++].value = (u8) 1;
1392
1393 cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_LO;
1394 if (image_type == VIDEOSIZE_VGA)
1395 cmd.buffer.registers[i++].value = (u8) 250;
1396 else if (image_type == VIDEOSIZE_QVGA)
1397 cmd.buffer.registers[i++].value = (u8) 125;
1398 else
1399 cmd.buffer.registers[i++].value = (u8) 160;
1400
1401 cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_HI;
1402 if (image_type == VIDEOSIZE_VGA)
1403 cmd.buffer.registers[i++].value = (u8) 2;
1404 else
1405 cmd.buffer.registers[i++].value = (u8) 1;
1406
1407 cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_LO;
1408 if (image_type == VIDEOSIZE_VGA)
1409 cmd.buffer.registers[i++].value = (u8) 12;
1410 else if (image_type == VIDEOSIZE_QVGA)
1411 cmd.buffer.registers[i++].value = (u8) 64;
1412 else
1413 cmd.buffer.registers[i++].value = (u8) 6;
1414
1415 /* Output Image Size */
1416 cmd.buffer.registers[i].index = CPIA2_VC_VC_OHSIZE;
1417 if (image_type == VIDEOSIZE_QCIF)
1418 cmd.buffer.registers[i++].value = STV_IMAGE_CIF_COLS / 4;
1419 else
1420 cmd.buffer.registers[i++].value = width / 4;
1421
1422 cmd.buffer.registers[i].index = CPIA2_VC_VC_OVSIZE;
1423 if (image_type == VIDEOSIZE_QCIF)
1424 cmd.buffer.registers[i++].value = STV_IMAGE_CIF_ROWS / 4;
1425 else
1426 cmd.buffer.registers[i++].value = height / 4;
1427
1428 /* Cropping */
1429 cmd.buffer.registers[i].index = CPIA2_VC_VC_HCROP;
1430 if (image_type == VIDEOSIZE_VGA)
1431 cmd.buffer.registers[i++].value =
1432 (u8) (((STV_IMAGE_VGA_COLS / 4) - (width / 4)) / 2);
1433 else if (image_type == VIDEOSIZE_QVGA)
1434 cmd.buffer.registers[i++].value =
1435 (u8) (((STV_IMAGE_QVGA_COLS / 4) - (width / 4)) / 2);
1436 else if (image_type == VIDEOSIZE_CIF)
1437 cmd.buffer.registers[i++].value =
1438 (u8) (((STV_IMAGE_CIF_COLS / 4) - (width / 4)) / 2);
1439 else /*if (image_type == VIDEOSIZE_QCIF)*/
1440 cmd.buffer.registers[i++].value =
1441 (u8) (((STV_IMAGE_QCIF_COLS / 4) - (width / 4)) / 2);
1442
1443 cmd.buffer.registers[i].index = CPIA2_VC_VC_VCROP;
1444 if (image_type == VIDEOSIZE_VGA)
1445 cmd.buffer.registers[i++].value =
1446 (u8) (((STV_IMAGE_VGA_ROWS / 4) - (height / 4)) / 2);
1447 else if (image_type == VIDEOSIZE_QVGA)
1448 cmd.buffer.registers[i++].value =
1449 (u8) (((STV_IMAGE_QVGA_ROWS / 4) - (height / 4)) / 2);
1450 else if (image_type == VIDEOSIZE_CIF)
1451 cmd.buffer.registers[i++].value =
1452 (u8) (((STV_IMAGE_CIF_ROWS / 4) - (height / 4)) / 2);
1453 else /*if (image_type == VIDEOSIZE_QCIF)*/
1454 cmd.buffer.registers[i++].value =
1455 (u8) (((STV_IMAGE_QCIF_ROWS / 4) - (height / 4)) / 2);
1456
1457 /* Scaling registers (defaults) */
1458 cmd.buffer.registers[i].index = CPIA2_VC_VC_HPHASE;
1459 if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF)
1460 cmd.buffer.registers[i++].value = (u8) 36;
1461 else
1462 cmd.buffer.registers[i++].value = (u8) 0;
1463
1464 cmd.buffer.registers[i].index = CPIA2_VC_VC_VPHASE;
1465 if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF)
1466 cmd.buffer.registers[i++].value = (u8) 32;
1467 else
1468 cmd.buffer.registers[i++].value = (u8) 0;
1469
1470 cmd.buffer.registers[i].index = CPIA2_VC_VC_HISPAN;
1471 if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF)
1472 cmd.buffer.registers[i++].value = (u8) 26;
1473 else
1474 cmd.buffer.registers[i++].value = (u8) 31;
1475
1476 cmd.buffer.registers[i].index = CPIA2_VC_VC_VISPAN;
1477 if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF)
1478 cmd.buffer.registers[i++].value = (u8) 21;
1479 else
1480 cmd.buffer.registers[i++].value = (u8) 31;
1481
1482 cmd.buffer.registers[i].index = CPIA2_VC_VC_HICROP;
1483 cmd.buffer.registers[i++].value = (u8) 0;
1484
1485 cmd.buffer.registers[i].index = CPIA2_VC_VC_VICROP;
1486 cmd.buffer.registers[i++].value = (u8) 0;
1487
1488 cmd.buffer.registers[i].index = CPIA2_VC_VC_HFRACT;
1489 if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF)
1490 cmd.buffer.registers[i++].value = (u8) 0x2B; /* 2/11 */
1491 else
1492 cmd.buffer.registers[i++].value = (u8) 0x81; /* 8/1 */
1493
1494 cmd.buffer.registers[i].index = CPIA2_VC_VC_VFRACT;
1495 if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF)
1496 cmd.buffer.registers[i++].value = (u8) 0x13; /* 1/3 */
1497 else
1498 cmd.buffer.registers[i++].value = (u8) 0x81; /* 8/1 */
1499
1500 cmd.reg_count = i;
1501
1502 cpia2_send_command(cam, &cmd);
1503
1504 return i;
1505}
1506
1507
1508/******************************************************************************
1509 *
1510 * setallproperties
1511 *
1512 * This sets all user changeable properties to the values in cam->params.
1513 *****************************************************************************/
1514int set_all_properties(struct camera_data *cam)
1515{
1516 /**
1517 * Don't set target_kb here, it will be set later.
1518 * framerate and user_mode were already set (set_default_user_mode).
1519 **/
1520
1521 cpia2_set_color_params(cam);
1522
1523 cpia2_usb_change_streaming_alternate(cam,
1524 cam->params.camera_state.stream_mode);
1525
1526 cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
1527 cam->params.vp_params.user_effects);
1528
1529 cpia2_set_flicker_mode(cam,
1530 cam->params.flicker_control.flicker_mode_req);
1531
1532 cpia2_do_command(cam,
1533 CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION,
1534 TRANSFER_WRITE, cam->params.vp_params.gpio_direction);
1535 cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, TRANSFER_WRITE,
1536 cam->params.vp_params.gpio_data);
1537
1538 wake_system(cam);
1539
1540 set_lowlight_boost(cam);
1541
1542 return 0;
1543}
1544
1545/******************************************************************************
1546 *
1547 * cpia2_save_camera_state
1548 *
1549 *****************************************************************************/
1550void cpia2_save_camera_state(struct camera_data *cam)
1551{
1552 get_color_params(cam);
1553 cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0);
1554 cpia2_do_command(cam, CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION, TRANSFER_READ,
1555 0);
1556 cpia2_do_command(cam, CPIA2_CMD_GET_VC_MP_GPIO_DATA, TRANSFER_READ, 0);
1557 /* Don't get framerate or target_kb. Trust the values we already have */
1558}
1559
1560/******************************************************************************
1561 *
1562 * get_color_params
1563 *
1564 *****************************************************************************/
1565void get_color_params(struct camera_data *cam)
1566{
1567 cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS, TRANSFER_READ, 0);
1568 cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION, TRANSFER_READ, 0);
1569 cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST, TRANSFER_READ, 0);
1570}
1571
1572/******************************************************************************
1573 *
1574 * cpia2_set_color_params
1575 *
1576 *****************************************************************************/
1577void cpia2_set_color_params(struct camera_data *cam)
1578{
1579 DBG("Setting color params\n");
1580 cpia2_set_brightness(cam, cam->params.color_params.brightness);
1581 cpia2_set_contrast(cam, cam->params.color_params.contrast);
1582 cpia2_set_saturation(cam, cam->params.color_params.saturation);
1583}
1584
1585/******************************************************************************
1586 *
1587 * cpia2_set_flicker_mode
1588 *
1589 *****************************************************************************/
1590int cpia2_set_flicker_mode(struct camera_data *cam, int mode)
1591{
1592 unsigned char cam_reg;
1593 int err = 0;
1594
1595 if(cam->params.pnp_id.device_type != DEVICE_STV_672)
1596 return -EINVAL;
1597
1598 /* Set the appropriate bits in FLICKER_MODES, preserving the rest */
1599 if((err = cpia2_do_command(cam, CPIA2_CMD_GET_FLICKER_MODES,
1600 TRANSFER_READ, 0)))
1601 return err;
1602 cam_reg = cam->params.flicker_control.cam_register;
1603
1604 switch(mode) {
1605 case NEVER_FLICKER:
1606 cam_reg |= CPIA2_VP_FLICKER_MODES_NEVER_FLICKER;
1607 cam_reg &= ~CPIA2_VP_FLICKER_MODES_50HZ;
1608 break;
1609 case FLICKER_60:
1610 cam_reg &= ~CPIA2_VP_FLICKER_MODES_NEVER_FLICKER;
1611 cam_reg &= ~CPIA2_VP_FLICKER_MODES_50HZ;
1612 break;
1613 case FLICKER_50:
1614 cam_reg &= ~CPIA2_VP_FLICKER_MODES_NEVER_FLICKER;
1615 cam_reg |= CPIA2_VP_FLICKER_MODES_50HZ;
1616 break;
1617 default:
1618 return -EINVAL;
1619 }
1620
1621 if((err = cpia2_do_command(cam, CPIA2_CMD_SET_FLICKER_MODES,
1622 TRANSFER_WRITE, cam_reg)))
1623 return err;
1624
1625 /* Set the appropriate bits in EXP_MODES, preserving the rest */
1626 if((err = cpia2_do_command(cam, CPIA2_CMD_GET_VP_EXP_MODES,
1627 TRANSFER_READ, 0)))
1628 return err;
1629 cam_reg = cam->params.vp_params.exposure_modes;
1630
1631 if (mode == NEVER_FLICKER) {
1632 cam_reg |= CPIA2_VP_EXPOSURE_MODES_INHIBIT_FLICKER;
1633 } else {
1634 cam_reg &= ~CPIA2_VP_EXPOSURE_MODES_INHIBIT_FLICKER;
1635 }
1636
1637 if((err = cpia2_do_command(cam, CPIA2_CMD_SET_VP_EXP_MODES,
1638 TRANSFER_WRITE, cam_reg)))
1639 return err;
1640
1641 if((err = cpia2_do_command(cam, CPIA2_CMD_REHASH_VP4,
1642 TRANSFER_WRITE, 1)))
1643 return err;
1644
1645 switch(mode) {
1646 case NEVER_FLICKER:
1647 cam->params.flicker_control.flicker_mode_req = mode;
1648 break;
1649 case FLICKER_60:
1650 cam->params.flicker_control.flicker_mode_req = mode;
1651 cam->params.flicker_control.mains_frequency = 60;
1652 break;
1653 case FLICKER_50:
1654 cam->params.flicker_control.flicker_mode_req = mode;
1655 cam->params.flicker_control.mains_frequency = 50;
1656 break;
1657 default:
1658 err = -EINVAL;
1659 }
1660
1661 return err;
1662}
1663
1664/******************************************************************************
1665 *
1666 * cpia2_set_property_flip
1667 *
1668 *****************************************************************************/
1669void cpia2_set_property_flip(struct camera_data *cam, int prop_val)
1670{
1671 unsigned char cam_reg;
1672
1673 cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0);
1674 cam_reg = cam->params.vp_params.user_effects;
1675
1676 if (prop_val)
1677 {
1678 cam_reg |= CPIA2_VP_USER_EFFECTS_FLIP;
1679 }
1680 else
1681 {
1682 cam_reg &= ~CPIA2_VP_USER_EFFECTS_FLIP;
1683 }
1684 cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
1685 cam_reg);
1686}
1687
1688/******************************************************************************
1689 *
1690 * cpia2_set_property_mirror
1691 *
1692 *****************************************************************************/
1693void cpia2_set_property_mirror(struct camera_data *cam, int prop_val)
1694{
1695 unsigned char cam_reg;
1696
1697 cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0);
1698 cam_reg = cam->params.vp_params.user_effects;
1699
1700 if (prop_val)
1701 {
1702 cam_reg |= CPIA2_VP_USER_EFFECTS_MIRROR;
1703 }
1704 else
1705 {
1706 cam_reg &= ~CPIA2_VP_USER_EFFECTS_MIRROR;
1707 }
1708 cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
1709 cam_reg);
1710}
1711
1712/******************************************************************************
1713 *
1714 * set_target_kb
1715 *
1716 * The new Target KB is set in cam->params.vc_params.target_kb and
1717 * activates on reset.
1718 *****************************************************************************/
1719
1720int cpia2_set_target_kb(struct camera_data *cam, unsigned char value)
1721{
1722 DBG("Requested target_kb = %d\n", value);
1723 if (value != cam->params.vc_params.target_kb) {
1724
1725 cpia2_usb_stream_pause(cam);
1726
1727 /* reset camera for new target_kb */
1728 cam->params.vc_params.target_kb = value;
1729 cpia2_reset_camera(cam);
1730
1731 cpia2_usb_stream_resume(cam);
1732 }
1733
1734 return 0;
1735}
1736
1737/******************************************************************************
1738 *
1739 * cpia2_set_gpio
1740 *
1741 *****************************************************************************/
1742int cpia2_set_gpio(struct camera_data *cam, unsigned char setting)
1743{
1744 int ret;
1745
1746 /* Set the microport direction (register 0x90, should be defined
1747 * already) to 1 (user output), and set the microport data (0x91) to
1748 * the value in the ioctl argument.
1749 */
1750
1751 ret = cpia2_do_command(cam,
1752 CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION,
1753 CPIA2_VC_MP_DIR_OUTPUT,
1754 255);
1755 if (ret < 0)
1756 return ret;
1757 cam->params.vp_params.gpio_direction = 255;
1758
1759 ret = cpia2_do_command(cam,
1760 CPIA2_CMD_SET_VC_MP_GPIO_DATA,
1761 CPIA2_VC_MP_DIR_OUTPUT,
1762 setting);
1763 if (ret < 0)
1764 return ret;
1765 cam->params.vp_params.gpio_data = setting;
1766
1767 return 0;
1768}
1769
1770/******************************************************************************
1771 *
1772 * cpia2_set_fps
1773 *
1774 *****************************************************************************/
1775int cpia2_set_fps(struct camera_data *cam, int framerate)
1776{
1777 int retval;
1778
1779 switch(framerate) {
1780 case CPIA2_VP_FRAMERATE_30:
1781 case CPIA2_VP_FRAMERATE_25:
1782 if(cam->params.pnp_id.device_type == DEVICE_STV_672 &&
1783 cam->params.version.sensor_flags ==
1784 CPIA2_VP_SENSOR_FLAGS_500) {
1785 return -EINVAL;
1786 }
1787 /* Fall through */
1788 case CPIA2_VP_FRAMERATE_15:
1789 case CPIA2_VP_FRAMERATE_12_5:
1790 case CPIA2_VP_FRAMERATE_7_5:
1791 case CPIA2_VP_FRAMERATE_6_25:
1792 break;
1793 default:
1794 return -EINVAL;
1795 }
1796
1797 if (cam->params.pnp_id.device_type == DEVICE_STV_672 &&
1798 framerate == CPIA2_VP_FRAMERATE_15)
1799 framerate = 0; /* Work around bug in VP4 */
1800
1801 retval = cpia2_do_command(cam,
1802 CPIA2_CMD_FRAMERATE_REQ,
1803 TRANSFER_WRITE,
1804 framerate);
1805
1806 if(retval == 0)
1807 cam->params.vp_params.frame_rate = framerate;
1808
1809 return retval;
1810}
1811
1812/******************************************************************************
1813 *
1814 * cpia2_set_brightness
1815 *
1816 *****************************************************************************/
1817void cpia2_set_brightness(struct camera_data *cam, unsigned char value)
1818{
1819 /***
1820 * Don't let the register be set to zero - bug in VP4 - flash of full
1821 * brightness
1822 ***/
1823 if (cam->params.pnp_id.device_type == DEVICE_STV_672 && value == 0)
1824 value++;
1825 DBG("Setting brightness to %d (0x%0x)\n", value, value);
1826 cpia2_do_command(cam,CPIA2_CMD_SET_VP_BRIGHTNESS, TRANSFER_WRITE,value);
1827}
1828
1829/******************************************************************************
1830 *
1831 * cpia2_set_contrast
1832 *
1833 *****************************************************************************/
1834void cpia2_set_contrast(struct camera_data *cam, unsigned char value)
1835{
1836 DBG("Setting contrast to %d (0x%0x)\n", value, value);
1837 cam->params.color_params.contrast = value;
1838 cpia2_do_command(cam, CPIA2_CMD_SET_CONTRAST, TRANSFER_WRITE, value);
1839}
1840
1841/******************************************************************************
1842 *
1843 * cpia2_set_saturation
1844 *
1845 *****************************************************************************/
1846void cpia2_set_saturation(struct camera_data *cam, unsigned char value)
1847{
1848 DBG("Setting saturation to %d (0x%0x)\n", value, value);
1849 cam->params.color_params.saturation = value;
1850 cpia2_do_command(cam,CPIA2_CMD_SET_VP_SATURATION, TRANSFER_WRITE,value);
1851}
1852
1853/******************************************************************************
1854 *
1855 * wake_system
1856 *
1857 *****************************************************************************/
1858void wake_system(struct camera_data *cam)
1859{
1860 cpia2_do_command(cam, CPIA2_CMD_SET_WAKEUP, TRANSFER_WRITE, 0);
1861}
1862
1863/******************************************************************************
1864 *
1865 * set_lowlight_boost
1866 *
1867 * Valid for STV500 sensor only
1868 *****************************************************************************/
1869void set_lowlight_boost(struct camera_data *cam)
1870{
1871 struct cpia2_command cmd;
1872
1873 if (cam->params.pnp_id.device_type != DEVICE_STV_672 ||
1874 cam->params.version.sensor_flags != CPIA2_VP_SENSOR_FLAGS_500)
1875 return;
1876
1877 cmd.direction = TRANSFER_WRITE;
1878 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
1879 cmd.reg_count = 3;
1880 cmd.start = CPIA2_VP_RAM_ADDR_H;
1881
1882 cmd.buffer.block_data[0] = 0; /* High byte of address to write to */
1883 cmd.buffer.block_data[1] = 0x59; /* Low byte of address to write to */
1884 cmd.buffer.block_data[2] = 0; /* High byte of data to write */
1885
1886 cpia2_send_command(cam, &cmd);
1887
1888 if (cam->params.vp_params.lowlight_boost) {
1889 cmd.buffer.block_data[0] = 0x02; /* Low byte data to write */
1890 } else {
1891 cmd.buffer.block_data[0] = 0x06;
1892 }
1893 cmd.start = CPIA2_VP_RAM_DATA;
1894 cmd.reg_count = 1;
1895 cpia2_send_command(cam, &cmd);
1896
1897 /* Rehash the VP4 values */
1898 cpia2_do_command(cam, CPIA2_CMD_REHASH_VP4, TRANSFER_WRITE, 1);
1899}
1900
1901/******************************************************************************
1902 *
1903 * cpia2_set_format
1904 *
1905 * Assumes that new size is already set in param struct.
1906 *****************************************************************************/
1907void cpia2_set_format(struct camera_data *cam)
1908{
1909 cam->flush = true;
1910
1911 cpia2_usb_stream_pause(cam);
1912
1913 /* reset camera to new size */
1914 cpia2_set_low_power(cam);
1915 cpia2_reset_camera(cam);
1916 cam->flush = false;
1917
1918 cpia2_dbg_dump_registers(cam);
1919
1920 cpia2_usb_stream_resume(cam);
1921}
1922
1923/******************************************************************************
1924 *
1925 * cpia2_dbg_dump_registers
1926 *
1927 *****************************************************************************/
1928void cpia2_dbg_dump_registers(struct camera_data *cam)
1929{
1930#ifdef _CPIA2_DEBUG_
1931 struct cpia2_command cmd;
1932
1933 if (!(debugs_on & DEBUG_DUMP_REGS))
1934 return;
1935
1936 cmd.direction = TRANSFER_READ;
1937
1938 /* Start with bank 0 (SYSTEM) */
1939 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
1940 cmd.reg_count = 3;
1941 cmd.start = 0;
1942 cpia2_send_command(cam, &cmd);
1943 printk(KERN_DEBUG "System Device Hi = 0x%X\n",
1944 cmd.buffer.block_data[0]);
1945 printk(KERN_DEBUG "System Device Lo = 0x%X\n",
1946 cmd.buffer.block_data[1]);
1947 printk(KERN_DEBUG "System_system control = 0x%X\n",
1948 cmd.buffer.block_data[2]);
1949
1950 /* Bank 1 (VC) */
1951 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
1952 cmd.reg_count = 4;
1953 cmd.start = 0x80;
1954 cpia2_send_command(cam, &cmd);
1955 printk(KERN_DEBUG "ASIC_ID = 0x%X\n",
1956 cmd.buffer.block_data[0]);
1957 printk(KERN_DEBUG "ASIC_REV = 0x%X\n",
1958 cmd.buffer.block_data[1]);
1959 printk(KERN_DEBUG "PW_CONTRL = 0x%X\n",
1960 cmd.buffer.block_data[2]);
1961 printk(KERN_DEBUG "WAKEUP = 0x%X\n",
1962 cmd.buffer.block_data[3]);
1963
1964 cmd.start = 0xA0; /* ST_CTRL */
1965 cmd.reg_count = 1;
1966 cpia2_send_command(cam, &cmd);
1967 printk(KERN_DEBUG "Stream ctrl = 0x%X\n",
1968 cmd.buffer.block_data[0]);
1969
1970 cmd.start = 0xA4; /* Stream status */
1971 cpia2_send_command(cam, &cmd);
1972 printk(KERN_DEBUG "Stream status = 0x%X\n",
1973 cmd.buffer.block_data[0]);
1974
1975 cmd.start = 0xA8; /* USB status */
1976 cmd.reg_count = 3;
1977 cpia2_send_command(cam, &cmd);
1978 printk(KERN_DEBUG "USB_CTRL = 0x%X\n",
1979 cmd.buffer.block_data[0]);
1980 printk(KERN_DEBUG "USB_STRM = 0x%X\n",
1981 cmd.buffer.block_data[1]);
1982 printk(KERN_DEBUG "USB_STATUS = 0x%X\n",
1983 cmd.buffer.block_data[2]);
1984
1985 cmd.start = 0xAF; /* USB settings */
1986 cmd.reg_count = 1;
1987 cpia2_send_command(cam, &cmd);
1988 printk(KERN_DEBUG "USB settings = 0x%X\n",
1989 cmd.buffer.block_data[0]);
1990
1991 cmd.start = 0xC0; /* VC stuff */
1992 cmd.reg_count = 26;
1993 cpia2_send_command(cam, &cmd);
1994 printk(KERN_DEBUG "VC Control = 0x%0X\n",
1995 cmd.buffer.block_data[0]);
1996 printk(KERN_DEBUG "VC Format = 0x%0X\n",
1997 cmd.buffer.block_data[3]);
1998 printk(KERN_DEBUG "VC Clocks = 0x%0X\n",
1999 cmd.buffer.block_data[4]);
2000 printk(KERN_DEBUG "VC IHSize = 0x%0X\n",
2001 cmd.buffer.block_data[5]);
2002 printk(KERN_DEBUG "VC Xlim Hi = 0x%0X\n",
2003 cmd.buffer.block_data[6]);
2004 printk(KERN_DEBUG "VC XLim Lo = 0x%0X\n",
2005 cmd.buffer.block_data[7]);
2006 printk(KERN_DEBUG "VC YLim Hi = 0x%0X\n",
2007 cmd.buffer.block_data[8]);
2008 printk(KERN_DEBUG "VC YLim Lo = 0x%0X\n",
2009 cmd.buffer.block_data[9]);
2010 printk(KERN_DEBUG "VC OHSize = 0x%0X\n",
2011 cmd.buffer.block_data[10]);
2012 printk(KERN_DEBUG "VC OVSize = 0x%0X\n",
2013 cmd.buffer.block_data[11]);
2014 printk(KERN_DEBUG "VC HCrop = 0x%0X\n",
2015 cmd.buffer.block_data[12]);
2016 printk(KERN_DEBUG "VC VCrop = 0x%0X\n",
2017 cmd.buffer.block_data[13]);
2018 printk(KERN_DEBUG "VC HPhase = 0x%0X\n",
2019 cmd.buffer.block_data[14]);
2020 printk(KERN_DEBUG "VC VPhase = 0x%0X\n",
2021 cmd.buffer.block_data[15]);
2022 printk(KERN_DEBUG "VC HIspan = 0x%0X\n",
2023 cmd.buffer.block_data[16]);
2024 printk(KERN_DEBUG "VC VIspan = 0x%0X\n",
2025 cmd.buffer.block_data[17]);
2026 printk(KERN_DEBUG "VC HiCrop = 0x%0X\n",
2027 cmd.buffer.block_data[18]);
2028 printk(KERN_DEBUG "VC ViCrop = 0x%0X\n",
2029 cmd.buffer.block_data[19]);
2030 printk(KERN_DEBUG "VC HiFract = 0x%0X\n",
2031 cmd.buffer.block_data[20]);
2032 printk(KERN_DEBUG "VC ViFract = 0x%0X\n",
2033 cmd.buffer.block_data[21]);
2034 printk(KERN_DEBUG "VC JPeg Opt = 0x%0X\n",
2035 cmd.buffer.block_data[22]);
2036 printk(KERN_DEBUG "VC Creep Per = 0x%0X\n",
2037 cmd.buffer.block_data[23]);
2038 printk(KERN_DEBUG "VC User Sq. = 0x%0X\n",
2039 cmd.buffer.block_data[24]);
2040 printk(KERN_DEBUG "VC Target KB = 0x%0X\n",
2041 cmd.buffer.block_data[25]);
2042
2043 /*** VP ***/
2044 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
2045 cmd.reg_count = 14;
2046 cmd.start = 0;
2047 cpia2_send_command(cam, &cmd);
2048
2049 printk(KERN_DEBUG "VP Dev Hi = 0x%0X\n",
2050 cmd.buffer.block_data[0]);
2051 printk(KERN_DEBUG "VP Dev Lo = 0x%0X\n",
2052 cmd.buffer.block_data[1]);
2053 printk(KERN_DEBUG "VP Sys State = 0x%0X\n",
2054 cmd.buffer.block_data[2]);
2055 printk(KERN_DEBUG "VP Sys Ctrl = 0x%0X\n",
2056 cmd.buffer.block_data[3]);
2057 printk(KERN_DEBUG "VP Sensor flg = 0x%0X\n",
2058 cmd.buffer.block_data[5]);
2059 printk(KERN_DEBUG "VP Sensor Rev = 0x%0X\n",
2060 cmd.buffer.block_data[6]);
2061 printk(KERN_DEBUG "VP Dev Config = 0x%0X\n",
2062 cmd.buffer.block_data[7]);
2063 printk(KERN_DEBUG "VP GPIO_DIR = 0x%0X\n",
2064 cmd.buffer.block_data[8]);
2065 printk(KERN_DEBUG "VP GPIO_DATA = 0x%0X\n",
2066 cmd.buffer.block_data[9]);
2067 printk(KERN_DEBUG "VP Ram ADDR H = 0x%0X\n",
2068 cmd.buffer.block_data[10]);
2069 printk(KERN_DEBUG "VP Ram ADDR L = 0x%0X\n",
2070 cmd.buffer.block_data[11]);
2071 printk(KERN_DEBUG "VP RAM Data = 0x%0X\n",
2072 cmd.buffer.block_data[12]);
2073 printk(KERN_DEBUG "Do Call = 0x%0X\n",
2074 cmd.buffer.block_data[13]);
2075
2076 if (cam->params.pnp_id.device_type == DEVICE_STV_672) {
2077 cmd.reg_count = 9;
2078 cmd.start = 0x0E;
2079 cpia2_send_command(cam, &cmd);
2080 printk(KERN_DEBUG "VP Clock Ctrl = 0x%0X\n",
2081 cmd.buffer.block_data[0]);
2082 printk(KERN_DEBUG "VP Patch Rev = 0x%0X\n",
2083 cmd.buffer.block_data[1]);
2084 printk(KERN_DEBUG "VP Vid Mode = 0x%0X\n",
2085 cmd.buffer.block_data[2]);
2086 printk(KERN_DEBUG "VP Framerate = 0x%0X\n",
2087 cmd.buffer.block_data[3]);
2088 printk(KERN_DEBUG "VP UserEffect = 0x%0X\n",
2089 cmd.buffer.block_data[4]);
2090 printk(KERN_DEBUG "VP White Bal = 0x%0X\n",
2091 cmd.buffer.block_data[5]);
2092 printk(KERN_DEBUG "VP WB thresh = 0x%0X\n",
2093 cmd.buffer.block_data[6]);
2094 printk(KERN_DEBUG "VP Exp Modes = 0x%0X\n",
2095 cmd.buffer.block_data[7]);
2096 printk(KERN_DEBUG "VP Exp Target = 0x%0X\n",
2097 cmd.buffer.block_data[8]);
2098
2099 cmd.reg_count = 1;
2100 cmd.start = 0x1B;
2101 cpia2_send_command(cam, &cmd);
2102 printk(KERN_DEBUG "VP FlickerMds = 0x%0X\n",
2103 cmd.buffer.block_data[0]);
2104 } else {
2105 cmd.reg_count = 8 ;
2106 cmd.start = 0x0E;
2107 cpia2_send_command(cam, &cmd);
2108 printk(KERN_DEBUG "VP Clock Ctrl = 0x%0X\n",
2109 cmd.buffer.block_data[0]);
2110 printk(KERN_DEBUG "VP Patch Rev = 0x%0X\n",
2111 cmd.buffer.block_data[1]);
2112 printk(KERN_DEBUG "VP Vid Mode = 0x%0X\n",
2113 cmd.buffer.block_data[5]);
2114 printk(KERN_DEBUG "VP Framerate = 0x%0X\n",
2115 cmd.buffer.block_data[6]);
2116 printk(KERN_DEBUG "VP UserEffect = 0x%0X\n",
2117 cmd.buffer.block_data[7]);
2118
2119 cmd.reg_count = 1;
2120 cmd.start = CPIA2_VP5_EXPOSURE_TARGET;
2121 cpia2_send_command(cam, &cmd);
2122 printk(KERN_DEBUG "VP5 Exp Target= 0x%0X\n",
2123 cmd.buffer.block_data[0]);
2124
2125 cmd.reg_count = 4;
2126 cmd.start = 0x3A;
2127 cpia2_send_command(cam, &cmd);
2128 printk(KERN_DEBUG "VP5 MY Black = 0x%0X\n",
2129 cmd.buffer.block_data[0]);
2130 printk(KERN_DEBUG "VP5 MCY Range = 0x%0X\n",
2131 cmd.buffer.block_data[1]);
2132 printk(KERN_DEBUG "VP5 MYCEILING = 0x%0X\n",
2133 cmd.buffer.block_data[2]);
2134 printk(KERN_DEBUG "VP5 MCUV Sat = 0x%0X\n",
2135 cmd.buffer.block_data[3]);
2136 }
2137#endif
2138}
2139
2140/******************************************************************************
2141 *
2142 * reset_camera_struct
2143 *
2144 * Sets all values to the defaults
2145 *****************************************************************************/
2146void reset_camera_struct(struct camera_data *cam)
2147{
2148 /***
2149 * The following parameter values are the defaults from the register map.
2150 ***/
2151 cam->params.color_params.brightness = DEFAULT_BRIGHTNESS;
2152 cam->params.color_params.contrast = DEFAULT_CONTRAST;
2153 cam->params.color_params.saturation = DEFAULT_SATURATION;
2154 cam->params.vp_params.lowlight_boost = 0;
2155
2156 /* FlickerModes */
2157 cam->params.flicker_control.flicker_mode_req = NEVER_FLICKER;
2158 cam->params.flicker_control.mains_frequency = 60;
2159
2160 /* jpeg params */
2161 cam->params.compression.jpeg_options = CPIA2_VC_VC_JPEG_OPT_DEFAULT;
2162 cam->params.compression.creep_period = 2;
2163 cam->params.compression.user_squeeze = 20;
2164 cam->params.compression.inhibit_htables = false;
2165
2166 /* gpio params */
2167 cam->params.vp_params.gpio_direction = 0; /* write, the default safe mode */
2168 cam->params.vp_params.gpio_data = 0;
2169
2170 /* Target kb params */
2171 cam->params.vc_params.target_kb = DEFAULT_TARGET_KB;
2172
2173 /***
2174 * Set Sensor FPS as fast as possible.
2175 ***/
2176 if(cam->params.pnp_id.device_type == DEVICE_STV_672) {
2177 if(cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500)
2178 cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_15;
2179 else
2180 cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_30;
2181 } else {
2182 cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_30;
2183 }
2184
2185 /***
2186 * Set default video mode as large as possible :
2187 * for vga sensor set to vga, for cif sensor set to CIF.
2188 ***/
2189 if (cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500) {
2190 cam->sensor_type = CPIA2_SENSOR_500;
2191 cam->video_size = VIDEOSIZE_VGA;
2192 cam->params.roi.width = STV_IMAGE_VGA_COLS;
2193 cam->params.roi.height = STV_IMAGE_VGA_ROWS;
2194 } else {
2195 cam->sensor_type = CPIA2_SENSOR_410;
2196 cam->video_size = VIDEOSIZE_CIF;
2197 cam->params.roi.width = STV_IMAGE_CIF_COLS;
2198 cam->params.roi.height = STV_IMAGE_CIF_ROWS;
2199 }
2200
2201 /***
2202 * Fill in the v4l structures. video_cap is filled in inside the VIDIOCCAP
2203 * Ioctl. Here, just do the window and picture stucts.
2204 ***/
2205 cam->vp.palette = (u16) VIDEO_PALETTE_RGB24; /* Is this right? */
2206 cam->vp.brightness = (u16) cam->params.color_params.brightness * 256;
2207 cam->vp.colour = (u16) cam->params.color_params.saturation * 256;
2208 cam->vp.contrast = (u16) cam->params.color_params.contrast * 256;
2209
2210 cam->vw.x = 0;
2211 cam->vw.y = 0;
2212 cam->vw.width = cam->params.roi.width;
2213 cam->vw.height = cam->params.roi.height;
2214 cam->vw.flags = 0;
2215 cam->vw.clipcount = 0;
2216
2217 return;
2218}
2219
2220/******************************************************************************
2221 *
2222 * cpia2_init_camera_struct
2223 *
2224 * Initializes camera struct, does not call reset to fill in defaults.
2225 *****************************************************************************/
2226struct camera_data *cpia2_init_camera_struct(void)
2227{
2228 struct camera_data *cam;
2229
2230 cam = kmalloc(sizeof(*cam), GFP_KERNEL);
2231
2232 if (!cam) {
2233 ERR("couldn't kmalloc cpia2 struct\n");
2234 return NULL;
2235 }
2236
2237 /* Default everything to 0 */
2238 memset(cam, 0, sizeof(struct camera_data));
2239
2240 cam->present = 1;
2241 init_MUTEX(&cam->busy_lock);
2242 init_waitqueue_head(&cam->wq_stream);
2243
2244 return cam;
2245}
2246
2247/******************************************************************************
2248 *
2249 * cpia2_init_camera
2250 *
2251 * Initializes camera.
2252 *****************************************************************************/
2253int cpia2_init_camera(struct camera_data *cam)
2254{
2255 DBG("Start\n");
2256
2257 cam->mmapped = false;
2258
2259 /* Get sensor and asic types before reset. */
2260 cpia2_set_high_power(cam);
2261 cpia2_get_version_info(cam);
2262 if (cam->params.version.asic_id != CPIA2_ASIC_672) {
2263 ERR("Device IO error (asicID has incorrect value of 0x%X\n",
2264 cam->params.version.asic_id);
2265 return -ENODEV;
2266 }
2267
2268 /* Set GPIO direction and data to a safe state. */
2269 cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION,
2270 TRANSFER_WRITE, 0);
2271 cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA,
2272 TRANSFER_WRITE, 0);
2273
2274 /* resetting struct requires version info for sensor and asic types */
2275 reset_camera_struct(cam);
2276
2277 cpia2_set_low_power(cam);
2278
2279 DBG("End\n");
2280
2281 return 0;
2282}
2283
2284/******************************************************************************
2285 *
2286 * cpia2_allocate_buffers
2287 *
2288 *****************************************************************************/
2289int cpia2_allocate_buffers(struct camera_data *cam)
2290{
2291 int i;
2292
2293 if(!cam->buffers) {
2294 u32 size = cam->num_frames*sizeof(struct framebuf);
2295 cam->buffers = kmalloc(size, GFP_KERNEL);
2296 if(!cam->buffers) {
2297 ERR("couldn't kmalloc frame buffer structures\n");
2298 return -ENOMEM;
2299 }
2300 }
2301
2302 if(!cam->frame_buffer) {
2303 cam->frame_buffer = rvmalloc(cam->frame_size*cam->num_frames);
2304 if (!cam->frame_buffer) {
2305 ERR("couldn't vmalloc frame buffer data area\n");
2306 kfree(cam->buffers);
2307 cam->buffers = NULL;
2308 return -ENOMEM;
2309 }
2310 }
2311
2312 for(i=0; i<cam->num_frames-1; ++i) {
2313 cam->buffers[i].next = &cam->buffers[i+1];
2314 cam->buffers[i].data = cam->frame_buffer +i*cam->frame_size;
2315 cam->buffers[i].status = FRAME_EMPTY;
2316 cam->buffers[i].length = 0;
2317 cam->buffers[i].max_length = 0;
2318 cam->buffers[i].num = i;
2319 }
2320 cam->buffers[i].next = cam->buffers;
2321 cam->buffers[i].data = cam->frame_buffer +i*cam->frame_size;
2322 cam->buffers[i].status = FRAME_EMPTY;
2323 cam->buffers[i].length = 0;
2324 cam->buffers[i].max_length = 0;
2325 cam->buffers[i].num = i;
2326 cam->curbuff = cam->buffers;
2327 cam->workbuff = cam->curbuff->next;
2328 DBG("buffers=%p, curbuff=%p, workbuff=%p\n", cam->buffers, cam->curbuff,
2329 cam->workbuff);
2330 return 0;
2331}
2332
2333/******************************************************************************
2334 *
2335 * cpia2_free_buffers
2336 *
2337 *****************************************************************************/
2338void cpia2_free_buffers(struct camera_data *cam)
2339{
2340 if(cam->buffers) {
2341 kfree(cam->buffers);
2342 cam->buffers = NULL;
2343 }
2344 if(cam->frame_buffer) {
2345 rvfree(cam->frame_buffer, cam->frame_size*cam->num_frames);
2346 cam->frame_buffer = NULL;
2347 }
2348}
2349
2350/******************************************************************************
2351 *
2352 * cpia2_read
2353 *
2354 *****************************************************************************/
2355long cpia2_read(struct camera_data *cam,
2356 char __user *buf, unsigned long count, int noblock)
2357{
2358 struct framebuf *frame;
2359 if (!count) {
2360 return 0;
2361 }
2362
2363 if (!buf) {
2364 ERR("%s: buffer NULL\n",__FUNCTION__);
2365 return -EINVAL;
2366 }
2367
2368 if (!cam) {
2369 ERR("%s: Internal error, camera_data NULL!\n",__FUNCTION__);
2370 return -EINVAL;
2371 }
2372
2373 /* make this _really_ smp and multithread-safe */
2374 if (down_interruptible(&cam->busy_lock))
2375 return -ERESTARTSYS;
2376
2377 if (!cam->present) {
2378 LOG("%s: camera removed\n",__FUNCTION__);
2379 up(&cam->busy_lock);
2380 return 0; /* EOF */
2381 }
2382
2383 if(!cam->streaming) {
2384 /* Start streaming */
2385 cpia2_usb_stream_start(cam,
2386 cam->params.camera_state.stream_mode);
2387 }
2388
2389 /* Copy cam->curbuff in case it changes while we're processing */
2390 frame = cam->curbuff;
2391 if (noblock && frame->status != FRAME_READY) {
2392 up(&cam->busy_lock);
2393 return -EAGAIN;
2394 }
2395
2396 if(frame->status != FRAME_READY) {
2397 up(&cam->busy_lock);
2398 wait_event_interruptible(cam->wq_stream,
2399 !cam->present ||
2400 (frame = cam->curbuff)->status == FRAME_READY);
2401 if (signal_pending(current))
2402 return -ERESTARTSYS;
2403 /* make this _really_ smp and multithread-safe */
2404 if (down_interruptible(&cam->busy_lock)) {
2405 return -ERESTARTSYS;
2406 }
2407 if(!cam->present) {
2408 up(&cam->busy_lock);
2409 return 0;
2410 }
2411 }
2412
2413 /* copy data to user space */
2414 if (frame->length > count) {
2415 up(&cam->busy_lock);
2416 return -EFAULT;
2417 }
2418 if (copy_to_user(buf, frame->data, frame->length)) {
2419 up(&cam->busy_lock);
2420 return -EFAULT;
2421 }
2422
2423 count = frame->length;
2424
2425 frame->status = FRAME_EMPTY;
2426
2427 up(&cam->busy_lock);
2428 return count;
2429}
2430
2431/******************************************************************************
2432 *
2433 * cpia2_poll
2434 *
2435 *****************************************************************************/
2436unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
2437 poll_table *wait)
2438{
2439 unsigned int status=0;
2440
2441 if(!cam) {
2442 ERR("%s: Internal error, camera_data not found!\n",__FUNCTION__);
2443 return POLLERR;
2444 }
2445
2446 down(&cam->busy_lock);
2447
2448 if(!cam->present) {
2449 up(&cam->busy_lock);
2450 return POLLHUP;
2451 }
2452
2453 if(!cam->streaming) {
2454 /* Start streaming */
2455 cpia2_usb_stream_start(cam,
2456 cam->params.camera_state.stream_mode);
2457 }
2458
2459 up(&cam->busy_lock);
2460 poll_wait(filp, &cam->wq_stream, wait);
2461 down(&cam->busy_lock);
2462
2463 if(!cam->present)
2464 status = POLLHUP;
2465 else if(cam->curbuff->status == FRAME_READY)
2466 status = POLLIN | POLLRDNORM;
2467
2468 up(&cam->busy_lock);
2469 return status;
2470}
2471
2472/******************************************************************************
2473 *
2474 * cpia2_remap_buffer
2475 *
2476 *****************************************************************************/
2477int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
2478{
2479 const char *adr = (const char *)vma->vm_start;
2480 unsigned long size = vma->vm_end-vma->vm_start;
2481 unsigned long start_offset = vma->vm_pgoff << PAGE_SHIFT;
2482 unsigned long start = (unsigned long) adr;
2483 unsigned long page, pos;
2484
2485 if (!cam)
2486 return -ENODEV;
2487
2488 DBG("mmap offset:%ld size:%ld\n", start_offset, size);
2489
2490 /* make this _really_ smp-safe */
2491 if (down_interruptible(&cam->busy_lock))
2492 return -ERESTARTSYS;
2493
2494 if (!cam->present) {
2495 up(&cam->busy_lock);
2496 return -ENODEV;
2497 }
2498
2499 if (size > cam->frame_size*cam->num_frames ||
2500 (start_offset % cam->frame_size) != 0 ||
2501 (start_offset+size > cam->frame_size*cam->num_frames)) {
2502 up(&cam->busy_lock);
2503 return -EINVAL;
2504 }
2505
2506 pos = ((unsigned long) (cam->frame_buffer)) + start_offset;
2507 while (size > 0) {
2508 page = kvirt_to_pa(pos);
2509 if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) {
2510 up(&cam->busy_lock);
2511 return -EAGAIN;
2512 }
2513 start += PAGE_SIZE;
2514 pos += PAGE_SIZE;
2515 if (size > PAGE_SIZE)
2516 size -= PAGE_SIZE;
2517 else
2518 size = 0;
2519 }
2520
2521 cam->mmapped = true;
2522 up(&cam->busy_lock);
2523 return 0;
2524}
2525
diff --git a/drivers/media/video/cpia2/cpia2_registers.h b/drivers/media/video/cpia2/cpia2_registers.h
new file mode 100644
index 000000000000..3bbec514a967
--- /dev/null
+++ b/drivers/media/video/cpia2/cpia2_registers.h
@@ -0,0 +1,476 @@
1/****************************************************************************
2 *
3 * Filename: cpia2registers.h
4 *
5 * Copyright 2001, STMicrolectronics, Inc.
6 *
7 * Description:
8 * Definitions for the CPia2 register set
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 ****************************************************************************/
25
26#ifndef CPIA2_REGISTER_HEADER
27#define CPIA2_REGISTER_HEADER
28
29/***
30 * System register set (Bank 0)
31 ***/
32#define CPIA2_SYSTEM_DEVICE_HI 0x00
33#define CPIA2_SYSTEM_DEVICE_LO 0x01
34
35#define CPIA2_SYSTEM_SYSTEM_CONTROL 0x02
36#define CPIA2_SYSTEM_CONTROL_LOW_POWER 0x00
37#define CPIA2_SYSTEM_CONTROL_HIGH_POWER 0x01
38#define CPIA2_SYSTEM_CONTROL_SUSPEND 0x02
39#define CPIA2_SYSTEM_CONTROL_V2W_ERR 0x10
40#define CPIA2_SYSTEM_CONTROL_RB_ERR 0x10
41#define CPIA2_SYSTEM_CONTROL_CLEAR_ERR 0x80
42
43#define CPIA2_SYSTEM_INT_PACKET_CTRL 0x04
44#define CPIA2_SYSTEM_INT_PACKET_CTRL_ENABLE_SW_XX 0x01
45#define CPIA2_SYSTEM_INT_PACKET_CTRL_ENABLE_EOF 0x02
46#define CPIA2_SYSTEM_INT_PACKET_CTRL_ENABLE_INT1 0x04
47
48#define CPIA2_SYSTEM_CACHE_CTRL 0x05
49#define CPIA2_SYSTEM_CACHE_CTRL_CACHE_RESET 0x01
50#define CPIA2_SYSTEM_CACHE_CTRL_CACHE_FLUSH 0x02
51
52#define CPIA2_SYSTEM_SERIAL_CTRL 0x06
53#define CPIA2_SYSTEM_SERIAL_CTRL_NULL_CMD 0x00
54#define CPIA2_SYSTEM_SERIAL_CTRL_START_CMD 0x01
55#define CPIA2_SYSTEM_SERIAL_CTRL_STOP_CMD 0x02
56#define CPIA2_SYSTEM_SERIAL_CTRL_WRITE_CMD 0x03
57#define CPIA2_SYSTEM_SERIAL_CTRL_READ_ACK_CMD 0x04
58#define CPIA2_SYSTEM_SERIAL_CTRL_READ_NACK_CMD 0x05
59
60#define CPIA2_SYSTEM_SERIAL_DATA 0x07
61
62#define CPIA2_SYSTEM_VP_SERIAL_ADDR 0x08
63
64/***
65 * I2C addresses for various devices in CPiA2
66 ***/
67#define CPIA2_SYSTEM_VP_SERIAL_ADDR_SENSOR 0x20
68#define CPIA2_SYSTEM_VP_SERIAL_ADDR_VP 0x88
69#define CPIA2_SYSTEM_VP_SERIAL_ADDR_676_VP 0x8A
70
71#define CPIA2_SYSTEM_SPARE_REG1 0x09
72#define CPIA2_SYSTEM_SPARE_REG2 0x0A
73#define CPIA2_SYSTEM_SPARE_REG3 0x0B
74
75#define CPIA2_SYSTEM_MC_PORT_0 0x0C
76#define CPIA2_SYSTEM_MC_PORT_1 0x0D
77#define CPIA2_SYSTEM_MC_PORT_2 0x0E
78#define CPIA2_SYSTEM_MC_PORT_3 0x0F
79
80#define CPIA2_SYSTEM_STATUS_PKT 0x20
81#define CPIA2_SYSTEM_STATUS_PKT_END 0x27
82
83#define CPIA2_SYSTEM_DESCRIP_VID_HI 0x30
84#define CPIA2_SYSTEM_DESCRIP_VID_LO 0x31
85#define CPIA2_SYSTEM_DESCRIP_PID_HI 0x32
86#define CPIA2_SYSTEM_DESCRIP_PID_LO 0x33
87
88#define CPIA2_SYSTEM_FW_VERSION_HI 0x34
89#define CPIA2_SYSTEM_FW_VERSION_LO 0x35
90
91#define CPIA2_SYSTEM_CACHE_START_INDEX 0x80
92#define CPIA2_SYSTEM_CACHE_MAX_WRITES 0x10
93
94/***
95 * VC register set (Bank 1)
96 ***/
97#define CPIA2_VC_ASIC_ID 0x80
98
99#define CPIA2_VC_ASIC_REV 0x81
100
101#define CPIA2_VC_PW_CTRL 0x82
102#define CPIA2_VC_PW_CTRL_COLDSTART 0x01
103#define CPIA2_VC_PW_CTRL_CP_CLK_EN 0x02
104#define CPIA2_VC_PW_CTRL_VP_RESET_N 0x04
105#define CPIA2_VC_PW_CTRL_VC_CLK_EN 0x08
106#define CPIA2_VC_PW_CTRL_VC_RESET_N 0x10
107#define CPIA2_VC_PW_CTRL_GOTO_SUSPEND 0x20
108#define CPIA2_VC_PW_CTRL_UDC_SUSPEND 0x40
109#define CPIA2_VC_PW_CTRL_PWR_DOWN 0x80
110
111#define CPIA2_VC_WAKEUP 0x83
112#define CPIA2_VC_WAKEUP_SW_ENABLE 0x01
113#define CPIA2_VC_WAKEUP_XX_ENABLE 0x02
114#define CPIA2_VC_WAKEUP_SW_ATWAKEUP 0x04
115#define CPIA2_VC_WAKEUP_XX_ATWAKEUP 0x08
116
117#define CPIA2_VC_CLOCK_CTRL 0x84
118#define CPIA2_VC_CLOCK_CTRL_TESTUP72 0x01
119
120#define CPIA2_VC_INT_ENABLE 0x88
121#define CPIA2_VC_INT_ENABLE_XX_IE 0x01
122#define CPIA2_VC_INT_ENABLE_SW_IE 0x02
123#define CPIA2_VC_INT_ENABLE_VC_IE 0x04
124#define CPIA2_VC_INT_ENABLE_USBDATA_IE 0x08
125#define CPIA2_VC_INT_ENABLE_USBSETUP_IE 0x10
126#define CPIA2_VC_INT_ENABLE_USBCFG_IE 0x20
127
128#define CPIA2_VC_INT_FLAG 0x89
129#define CPIA2_VC_INT_ENABLE_XX_FLAG 0x01
130#define CPIA2_VC_INT_ENABLE_SW_FLAG 0x02
131#define CPIA2_VC_INT_ENABLE_VC_FLAG 0x04
132#define CPIA2_VC_INT_ENABLE_USBDATA_FLAG 0x08
133#define CPIA2_VC_INT_ENABLE_USBSETUP_FLAG 0x10
134#define CPIA2_VC_INT_ENABLE_USBCFG_FLAG 0x20
135#define CPIA2_VC_INT_ENABLE_SET_RESET_BIT 0x80
136
137#define CPIA2_VC_INT_STATE 0x8A
138#define CPIA2_VC_INT_STATE_XX_STATE 0x01
139#define CPIA2_VC_INT_STATE_SW_STATE 0x02
140
141#define CPIA2_VC_MP_DIR 0x90
142#define CPIA2_VC_MP_DIR_INPUT 0x00
143#define CPIA2_VC_MP_DIR_OUTPUT 0x01
144
145#define CPIA2_VC_MP_DATA 0x91
146
147#define CPIA2_VC_DP_CTRL 0x98
148#define CPIA2_VC_DP_CTRL_MODE_0 0x00
149#define CPIA2_VC_DP_CTRL_MODE_A 0x01
150#define CPIA2_VC_DP_CTRL_MODE_B 0x02
151#define CPIA2_VC_DP_CTRL_MODE_C 0x03
152#define CPIA2_VC_DP_CTRL_FAKE_FST 0x04
153
154#define CPIA2_VC_AD_CTRL 0x99
155#define CPIA2_VC_AD_CTRL_SRC_0 0x00
156#define CPIA2_VC_AD_CTRL_SRC_DIGI_A 0x01
157#define CPIA2_VC_AD_CTRL_SRC_REG 0x02
158#define CPIA2_VC_AD_CTRL_DST_USB 0x00
159#define CPIA2_VC_AD_CTRL_DST_REG 0x04
160
161#define CPIA2_VC_AD_TEST_IN 0x9B
162
163#define CPIA2_VC_AD_TEST_OUT 0x9C
164
165#define CPIA2_VC_AD_STATUS 0x9D
166#define CPIA2_VC_AD_STATUS_EMPTY 0x01
167#define CPIA2_VC_AD_STATUS_FULL 0x02
168
169#define CPIA2_VC_DP_DATA 0x9E
170
171#define CPIA2_VC_ST_CTRL 0xA0
172#define CPIA2_VC_ST_CTRL_SRC_VC 0x00
173#define CPIA2_VC_ST_CTRL_SRC_DP 0x01
174#define CPIA2_VC_ST_CTRL_SRC_REG 0x02
175
176#define CPIA2_VC_ST_CTRL_RAW_SELECT 0x04
177
178#define CPIA2_VC_ST_CTRL_DST_USB 0x00
179#define CPIA2_VC_ST_CTRL_DST_DP 0x08
180#define CPIA2_VC_ST_CTRL_DST_REG 0x10
181
182#define CPIA2_VC_ST_CTRL_FIFO_ENABLE 0x20
183#define CPIA2_VC_ST_CTRL_EOF_DETECT 0x40
184
185#define CPIA2_VC_ST_TEST 0xA1
186#define CPIA2_VC_ST_TEST_MODE_MANUAL 0x00
187#define CPIA2_VC_ST_TEST_MODE_INCREMENT 0x02
188
189#define CPIA2_VC_ST_TEST_AUTO_FILL 0x08
190
191#define CPIA2_VC_ST_TEST_REPEAT_FIFO 0x10
192
193#define CPIA2_VC_ST_TEST_IN 0xA2
194
195#define CPIA2_VC_ST_TEST_OUT 0xA3
196
197#define CPIA2_VC_ST_STATUS 0xA4
198#define CPIA2_VC_ST_STATUS_EMPTY 0x01
199#define CPIA2_VC_ST_STATUS_FULL 0x02
200
201#define CPIA2_VC_ST_FRAME_DETECT_1 0xA5
202
203#define CPIA2_VC_ST_FRAME_DETECT_2 0xA6
204
205#define CPIA2_VC_USB_CTRL 0xA8
206#define CPIA2_VC_USB_CTRL_CMD_STALLED 0x01
207#define CPIA2_VC_USB_CTRL_CMD_READY 0x02
208#define CPIA2_VC_USB_CTRL_CMD_STATUS 0x04
209#define CPIA2_VC_USB_CTRL_CMD_STATUS_DIR 0x08
210#define CPIA2_VC_USB_CTRL_CMD_NO_CLASH 0x10
211#define CPIA2_VC_USB_CTRL_CMD_MICRO_ACCESS 0x80
212
213#define CPIA2_VC_USB_STRM 0xA9
214#define CPIA2_VC_USB_STRM_ISO_ENABLE 0x01
215#define CPIA2_VC_USB_STRM_BLK_ENABLE 0x02
216#define CPIA2_VC_USB_STRM_INT_ENABLE 0x04
217#define CPIA2_VC_USB_STRM_AUD_ENABLE 0x08
218
219#define CPIA2_VC_USB_STATUS 0xAA
220#define CPIA2_VC_USB_STATUS_CMD_IN_PROGRESS 0x01
221#define CPIA2_VC_USB_STATUS_CMD_STATUS_STALL 0x02
222#define CPIA2_VC_USB_STATUS_CMD_HANDSHAKE 0x04
223#define CPIA2_VC_USB_STATUS_CMD_OVERRIDE 0x08
224#define CPIA2_VC_USB_STATUS_CMD_FIFO_BUSY 0x10
225#define CPIA2_VC_USB_STATUS_BULK_REPEAT_TXN 0x20
226#define CPIA2_VC_USB_STATUS_CONFIG_DONE 0x40
227#define CPIA2_VC_USB_STATUS_USB_SUSPEND 0x80
228
229#define CPIA2_VC_USB_CMDW 0xAB
230
231#define CPIA2_VC_USB_DATARW 0xAC
232
233#define CPIA2_VC_USB_INFO 0xAD
234
235#define CPIA2_VC_USB_CONFIG 0xAE
236
237#define CPIA2_VC_USB_SETTINGS 0xAF
238#define CPIA2_VC_USB_SETTINGS_CONFIG_MASK 0x03
239#define CPIA2_VC_USB_SETTINGS_INTERFACE_MASK 0x0C
240#define CPIA2_VC_USB_SETTINGS_ALTERNATE_MASK 0x70
241
242#define CPIA2_VC_USB_ISOLIM 0xB0
243
244#define CPIA2_VC_USB_ISOFAILS 0xB1
245
246#define CPIA2_VC_USB_ISOMAXPKTHI 0xB2
247
248#define CPIA2_VC_USB_ISOMAXPKTLO 0xB3
249
250#define CPIA2_VC_V2W_CTRL 0xB8
251#define CPIA2_VC_V2W_SELECT 0x01
252
253#define CPIA2_VC_V2W_SCL 0xB9
254
255#define CPIA2_VC_V2W_SDA 0xBA
256
257#define CPIA2_VC_VC_CTRL 0xC0
258#define CPIA2_VC_VC_CTRL_RUN 0x01
259#define CPIA2_VC_VC_CTRL_SINGLESHOT 0x02
260#define CPIA2_VC_VC_CTRL_IDLING 0x04
261#define CPIA2_VC_VC_CTRL_INHIBIT_H_TABLES 0x10
262#define CPIA2_VC_VC_CTRL_INHIBIT_Q_TABLES 0x20
263#define CPIA2_VC_VC_CTRL_INHIBIT_PRIVATE 0x40
264
265#define CPIA2_VC_VC_RESTART_IVAL_HI 0xC1
266
267#define CPIA2_VC_VC_RESTART_IVAL_LO 0xC2
268
269#define CPIA2_VC_VC_FORMAT 0xC3
270#define CPIA2_VC_VC_FORMAT_UFIRST 0x01
271#define CPIA2_VC_VC_FORMAT_MONO 0x02
272#define CPIA2_VC_VC_FORMAT_DECIMATING 0x04
273#define CPIA2_VC_VC_FORMAT_SHORTLINE 0x08
274#define CPIA2_VC_VC_FORMAT_SELFTEST 0x10
275
276#define CPIA2_VC_VC_CLOCKS 0xC4
277#define CPIA2_VC_VC_CLOCKS_CLKDIV_MASK 0x03
278#define CPIA2_VC_VC_672_CLOCKS_CIF_DIV_BY_3 0x04
279#define CPIA2_VC_VC_672_CLOCKS_SCALING 0x08
280#define CPIA2_VC_VC_CLOCKS_LOGDIV0 0x00
281#define CPIA2_VC_VC_CLOCKS_LOGDIV1 0x01
282#define CPIA2_VC_VC_CLOCKS_LOGDIV2 0x02
283#define CPIA2_VC_VC_CLOCKS_LOGDIV3 0x03
284#define CPIA2_VC_VC_676_CLOCKS_CIF_DIV_BY_3 0x08
285#define CPIA2_VC_VC_676_CLOCKS_SCALING 0x10
286
287#define CPIA2_VC_VC_IHSIZE_LO 0xC5
288
289#define CPIA2_VC_VC_XLIM_HI 0xC6
290
291#define CPIA2_VC_VC_XLIM_LO 0xC7
292
293#define CPIA2_VC_VC_YLIM_HI 0xC8
294
295#define CPIA2_VC_VC_YLIM_LO 0xC9
296
297#define CPIA2_VC_VC_OHSIZE 0xCA
298
299#define CPIA2_VC_VC_OVSIZE 0xCB
300
301#define CPIA2_VC_VC_HCROP 0xCC
302
303#define CPIA2_VC_VC_VCROP 0xCD
304
305#define CPIA2_VC_VC_HPHASE 0xCE
306
307#define CPIA2_VC_VC_VPHASE 0xCF
308
309#define CPIA2_VC_VC_HISPAN 0xD0
310
311#define CPIA2_VC_VC_VISPAN 0xD1
312
313#define CPIA2_VC_VC_HICROP 0xD2
314
315#define CPIA2_VC_VC_VICROP 0xD3
316
317#define CPIA2_VC_VC_HFRACT 0xD4
318#define CPIA2_VC_VC_HFRACT_DEN_MASK 0x0F
319#define CPIA2_VC_VC_HFRACT_NUM_MASK 0xF0
320
321#define CPIA2_VC_VC_VFRACT 0xD5
322#define CPIA2_VC_VC_VFRACT_DEN_MASK 0x0F
323#define CPIA2_VC_VC_VFRACT_NUM_MASK 0xF0
324
325#define CPIA2_VC_VC_JPEG_OPT 0xD6
326#define CPIA2_VC_VC_JPEG_OPT_DOUBLE_SQUEEZE 0x01
327#define CPIA2_VC_VC_JPEG_OPT_NO_DC_AUTO_SQUEEZE 0x02
328#define CPIA2_VC_VC_JPEG_OPT_AUTO_SQUEEZE 0x04
329#define CPIA2_VC_VC_JPEG_OPT_DEFAULT (CPIA2_VC_VC_JPEG_OPT_DOUBLE_SQUEEZE|\
330 CPIA2_VC_VC_JPEG_OPT_AUTO_SQUEEZE)
331
332
333#define CPIA2_VC_VC_CREEP_PERIOD 0xD7
334#define CPIA2_VC_VC_USER_SQUEEZE 0xD8
335#define CPIA2_VC_VC_TARGET_KB 0xD9
336
337#define CPIA2_VC_VC_AUTO_SQUEEZE 0xE6
338
339
340/***
341 * VP register set (Bank 2)
342 ***/
343#define CPIA2_VP_DEVICEH 0
344#define CPIA2_VP_DEVICEL 1
345
346#define CPIA2_VP_SYSTEMSTATE 0x02
347#define CPIA2_VP_SYSTEMSTATE_HK_ALIVE 0x01
348
349#define CPIA2_VP_SYSTEMCTRL 0x03
350#define CPIA2_VP_SYSTEMCTRL_REQ_CLEAR_ERROR 0x80
351#define CPIA2_VP_SYSTEMCTRL_POWER_DOWN_PLL 0x20
352#define CPIA2_VP_SYSTEMCTRL_REQ_SUSPEND_STATE 0x10
353#define CPIA2_VP_SYSTEMCTRL_REQ_SERIAL_WAKEUP 0x08
354#define CPIA2_VP_SYSTEMCTRL_REQ_AUTOLOAD 0x04
355#define CPIA2_VP_SYSTEMCTRL_HK_CONTROL 0x02
356#define CPIA2_VP_SYSTEMCTRL_POWER_CONTROL 0x01
357
358#define CPIA2_VP_SENSOR_FLAGS 0x05
359#define CPIA2_VP_SENSOR_FLAGS_404 0x01
360#define CPIA2_VP_SENSOR_FLAGS_407 0x02
361#define CPIA2_VP_SENSOR_FLAGS_409 0x04
362#define CPIA2_VP_SENSOR_FLAGS_410 0x08
363#define CPIA2_VP_SENSOR_FLAGS_500 0x10
364
365#define CPIA2_VP_SENSOR_REV 0x06
366
367#define CPIA2_VP_DEVICE_CONFIG 0x07
368#define CPIA2_VP_DEVICE_CONFIG_SERIAL_BRIDGE 0x01
369
370#define CPIA2_VP_GPIO_DIRECTION 0x08
371#define CPIA2_VP_GPIO_READ 0xFF
372#define CPIA2_VP_GPIO_WRITE 0x00
373
374#define CPIA2_VP_GPIO_DATA 0x09
375
376#define CPIA2_VP_RAM_ADDR_H 0x0A
377#define CPIA2_VP_RAM_ADDR_L 0x0B
378#define CPIA2_VP_RAM_DATA 0x0C
379
380#define CPIA2_VP_PATCH_REV 0x0F
381
382#define CPIA2_VP4_USER_MODE 0x10
383#define CPIA2_VP5_USER_MODE 0x13
384#define CPIA2_VP_USER_MODE_CIF 0x01
385#define CPIA2_VP_USER_MODE_QCIFDS 0x02
386#define CPIA2_VP_USER_MODE_QCIFPTC 0x04
387#define CPIA2_VP_USER_MODE_QVGADS 0x08
388#define CPIA2_VP_USER_MODE_QVGAPTC 0x10
389#define CPIA2_VP_USER_MODE_VGA 0x20
390
391#define CPIA2_VP4_FRAMERATE_REQUEST 0x11
392#define CPIA2_VP5_FRAMERATE_REQUEST 0x14
393#define CPIA2_VP_FRAMERATE_60 0x80
394#define CPIA2_VP_FRAMERATE_50 0x40
395#define CPIA2_VP_FRAMERATE_30 0x20
396#define CPIA2_VP_FRAMERATE_25 0x10
397#define CPIA2_VP_FRAMERATE_15 0x08
398#define CPIA2_VP_FRAMERATE_12_5 0x04
399#define CPIA2_VP_FRAMERATE_7_5 0x02
400#define CPIA2_VP_FRAMERATE_6_25 0x01
401
402#define CPIA2_VP4_USER_EFFECTS 0x12
403#define CPIA2_VP5_USER_EFFECTS 0x15
404#define CPIA2_VP_USER_EFFECTS_COLBARS 0x01
405#define CPIA2_VP_USER_EFFECTS_COLBARS_GRAD 0x02
406#define CPIA2_VP_USER_EFFECTS_MIRROR 0x04
407#define CPIA2_VP_USER_EFFECTS_FLIP 0x40 // VP5 only
408
409/* NOTE: CPIA2_VP_EXPOSURE_MODES shares the same register as VP5 User
410 * Effects */
411#define CPIA2_VP_EXPOSURE_MODES 0x15
412#define CPIA2_VP_EXPOSURE_MODES_INHIBIT_FLICKER 0x20
413#define CPIA2_VP_EXPOSURE_MODES_COMPILE_EXP 0x10
414
415#define CPIA2_VP4_EXPOSURE_TARGET 0x16 // VP4
416#define CPIA2_VP5_EXPOSURE_TARGET 0x20 // VP5
417
418#define CPIA2_VP_FLICKER_MODES 0x1B
419#define CPIA2_VP_FLICKER_MODES_50HZ 0x80
420#define CPIA2_VP_FLICKER_MODES_CUSTOM_FLT_FFREQ 0x40
421#define CPIA2_VP_FLICKER_MODES_NEVER_FLICKER 0x20
422#define CPIA2_VP_FLICKER_MODES_INHIBIT_RUB 0x10
423#define CPIA2_VP_FLICKER_MODES_ADJUST_LINE_FREQ 0x08
424#define CPIA2_VP_FLICKER_MODES_CUSTOM_INT_FFREQ 0x04
425
426#define CPIA2_VP_UMISC 0x1D
427#define CPIA2_VP_UMISC_FORCE_MONO 0x80
428#define CPIA2_VP_UMISC_FORCE_ID_MASK 0x40
429#define CPIA2_VP_UMISC_INHIBIT_AUTO_FGS 0x20
430#define CPIA2_VP_UMISC_INHIBIT_AUTO_DIMS 0x08
431#define CPIA2_VP_UMISC_OPT_FOR_SENSOR_DS 0x04
432#define CPIA2_VP_UMISC_INHIBIT_AUTO_MODE_INT 0x02
433
434#define CPIA2_VP5_ANTIFLKRSETUP 0x22 //34
435
436#define CPIA2_VP_INTERPOLATION 0x24
437#define CPIA2_VP_INTERPOLATION_EVEN_FIRST 0x40
438#define CPIA2_VP_INTERPOLATION_HJOG 0x20
439#define CPIA2_VP_INTERPOLATION_VJOG 0x10
440
441#define CPIA2_VP_GAMMA 0x25
442#define CPIA2_VP_DEFAULT_GAMMA 0x10
443
444#define CPIA2_VP_YRANGE 0x26
445
446#define CPIA2_VP_SATURATION 0x27
447
448#define CPIA2_VP5_MYBLACK_LEVEL 0x3A //58
449#define CPIA2_VP5_MCYRANGE 0x3B //59
450#define CPIA2_VP5_MYCEILING 0x3C //60
451#define CPIA2_VP5_MCUVSATURATION 0x3D //61
452
453
454#define CPIA2_VP_REHASH_VALUES 0x60
455
456
457/***
458 * Common sensor registers
459 ***/
460#define CPIA2_SENSOR_DEVICE_H 0x00
461#define CPIA2_SENSOR_DEVICE_L 0x01
462
463#define CPIA2_SENSOR_DATA_FORMAT 0x16
464#define CPIA2_SENSOR_DATA_FORMAT_HMIRROR 0x08
465#define CPIA2_SENSOR_DATA_FORMAT_VMIRROR 0x10
466
467#define CPIA2_SENSOR_CR1 0x76
468#define CPIA2_SENSOR_CR1_STAND_BY 0x01
469#define CPIA2_SENSOR_CR1_DOWN_RAMP_GEN 0x02
470#define CPIA2_SENSOR_CR1_DOWN_COLUMN_ADC 0x04
471#define CPIA2_SENSOR_CR1_DOWN_CAB_REGULATOR 0x08
472#define CPIA2_SENSOR_CR1_DOWN_AUDIO_REGULATOR 0x10
473#define CPIA2_SENSOR_CR1_DOWN_VRT_AMP 0x20
474#define CPIA2_SENSOR_CR1_DOWN_BAND_GAP 0x40
475
476#endif
diff --git a/drivers/media/video/cpia2/cpia2_usb.c b/drivers/media/video/cpia2/cpia2_usb.c
new file mode 100644
index 000000000000..f4da02941493
--- /dev/null
+++ b/drivers/media/video/cpia2/cpia2_usb.c
@@ -0,0 +1,907 @@
1/****************************************************************************
2 *
3 * Filename: cpia2_usb.c
4 *
5 * Copyright 2001, STMicrolectronics, Inc.
6 * Contact: steve.miller@st.com
7 *
8 * Description:
9 * This is a USB driver for CPia2 based video cameras.
10 * The infrastructure of this driver is based on the cpia usb driver by
11 * Jochen Scharrlach and Johannes Erdfeldt.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * Stripped of 2.4 stuff ready for main kernel submit by
28 * Alan Cox <alan@redhat.com>
29 ****************************************************************************/
30
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/usb.h>
34
35#include "cpia2.h"
36
37static int frame_sizes[] = {
38 0, // USBIF_CMDONLY
39 0, // USBIF_BULK
40 128, // USBIF_ISO_1
41 384, // USBIF_ISO_2
42 640, // USBIF_ISO_3
43 768, // USBIF_ISO_4
44 896, // USBIF_ISO_5
45 1023, // USBIF_ISO_6
46};
47
48#define FRAMES_PER_DESC 10
49#define FRAME_SIZE_PER_DESC frame_sizes[cam->cur_alt]
50
51static void process_frame(struct camera_data *cam);
52static void cpia2_usb_complete(struct urb *urb, struct pt_regs *);
53static int cpia2_usb_probe(struct usb_interface *intf,
54 const struct usb_device_id *id);
55static void cpia2_usb_disconnect(struct usb_interface *intf);
56
57static void free_sbufs(struct camera_data *cam);
58static void add_APPn(struct camera_data *cam);
59static void add_COM(struct camera_data *cam);
60static int submit_urbs(struct camera_data *cam);
61static int set_alternate(struct camera_data *cam, unsigned int alt);
62static int configure_transfer_mode(struct camera_data *cam, unsigned int alt);
63
64static struct usb_device_id cpia2_id_table[] = {
65 {USB_DEVICE(0x0553, 0x0100)},
66 {USB_DEVICE(0x0553, 0x0140)},
67 {USB_DEVICE(0x0553, 0x0151)}, /* STV0676 */
68 {} /* Terminating entry */
69};
70MODULE_DEVICE_TABLE(usb, cpia2_id_table);
71
72static struct usb_driver cpia2_driver = {
73 .name = "cpia2",
74 .probe = cpia2_usb_probe,
75 .disconnect = cpia2_usb_disconnect,
76 .id_table = cpia2_id_table
77};
78
79
80/******************************************************************************
81 *
82 * process_frame
83 *
84 *****************************************************************************/
85static void process_frame(struct camera_data *cam)
86{
87 static int frame_count = 0;
88
89 unsigned char *inbuff = cam->workbuff->data;
90
91 DBG("Processing frame #%d, current:%d\n",
92 cam->workbuff->num, cam->curbuff->num);
93
94 if(cam->workbuff->length > cam->workbuff->max_length)
95 cam->workbuff->max_length = cam->workbuff->length;
96
97 if ((inbuff[0] == 0xFF) && (inbuff[1] == 0xD8)) {
98 frame_count++;
99 } else {
100 cam->workbuff->status = FRAME_ERROR;
101 DBG("Start of frame not found\n");
102 return;
103 }
104
105 /***
106 * Now the output buffer should have a JPEG image in it.
107 ***/
108 if(!cam->first_image_seen) {
109 /* Always skip the first image after streaming
110 * starts. It is almost certainly corrupt. */
111 cam->first_image_seen = 1;
112 cam->workbuff->status = FRAME_EMPTY;
113 return;
114 }
115 if (cam->workbuff->length > 3) {
116 if(cam->mmapped &&
117 cam->workbuff->length < cam->workbuff->max_length) {
118 /* No junk in the buffers */
119 memset(cam->workbuff->data+cam->workbuff->length,
120 0, cam->workbuff->max_length-
121 cam->workbuff->length);
122 }
123 cam->workbuff->max_length = cam->workbuff->length;
124 cam->workbuff->status = FRAME_READY;
125
126 if(!cam->mmapped && cam->num_frames > 2) {
127 /* During normal reading, the most recent
128 * frame will be read. If the current frame
129 * hasn't started reading yet, it will never
130 * be read, so mark it empty. If the buffer is
131 * mmapped, or we have few buffers, we need to
132 * wait for the user to free the buffer.
133 *
134 * NOTE: This is not entirely foolproof with 3
135 * buffers, but it would take an EXTREMELY
136 * overloaded system to cause problems (possible
137 * image data corruption). Basically, it would
138 * need to take more time to execute cpia2_read
139 * than it would for the camera to send
140 * cam->num_frames-2 frames before problems
141 * could occur.
142 */
143 cam->curbuff->status = FRAME_EMPTY;
144 }
145 cam->curbuff = cam->workbuff;
146 cam->workbuff = cam->workbuff->next;
147 DBG("Changed buffers, work:%d, current:%d\n",
148 cam->workbuff->num, cam->curbuff->num);
149 return;
150 } else {
151 DBG("Not enough data for an image.\n");
152 }
153
154 cam->workbuff->status = FRAME_ERROR;
155 return;
156}
157
158/******************************************************************************
159 *
160 * add_APPn
161 *
162 * Adds a user specified APPn record
163 *****************************************************************************/
164static void add_APPn(struct camera_data *cam)
165{
166 if(cam->APP_len > 0) {
167 cam->workbuff->data[cam->workbuff->length++] = 0xFF;
168 cam->workbuff->data[cam->workbuff->length++] = 0xE0+cam->APPn;
169 cam->workbuff->data[cam->workbuff->length++] = 0;
170 cam->workbuff->data[cam->workbuff->length++] = cam->APP_len+2;
171 memcpy(cam->workbuff->data+cam->workbuff->length,
172 cam->APP_data, cam->APP_len);
173 cam->workbuff->length += cam->APP_len;
174 }
175}
176
177/******************************************************************************
178 *
179 * add_COM
180 *
181 * Adds a user specified COM record
182 *****************************************************************************/
183static void add_COM(struct camera_data *cam)
184{
185 if(cam->COM_len > 0) {
186 cam->workbuff->data[cam->workbuff->length++] = 0xFF;
187 cam->workbuff->data[cam->workbuff->length++] = 0xFE;
188 cam->workbuff->data[cam->workbuff->length++] = 0;
189 cam->workbuff->data[cam->workbuff->length++] = cam->COM_len+2;
190 memcpy(cam->workbuff->data+cam->workbuff->length,
191 cam->COM_data, cam->COM_len);
192 cam->workbuff->length += cam->COM_len;
193 }
194}
195
196/******************************************************************************
197 *
198 * cpia2_usb_complete
199 *
200 * callback when incoming packet is received
201 *****************************************************************************/
202static void cpia2_usb_complete(struct urb *urb, struct pt_regs *regs)
203{
204 int i;
205 unsigned char *cdata;
206 static int frame_ready = false;
207 struct camera_data *cam = (struct camera_data *) urb->context;
208
209 if (urb->status!=0) {
210 if (!(urb->status == -ENOENT ||
211 urb->status == -ECONNRESET ||
212 urb->status == -ESHUTDOWN))
213 {
214 DBG("urb->status = %d!\n", urb->status);
215 }
216 DBG("Stopping streaming\n");
217 return;
218 }
219
220 if (!cam->streaming || !cam->present || cam->open_count == 0) {
221 LOG("Will now stop the streaming: streaming = %d, "
222 "present=%d, open_count=%d\n",
223 cam->streaming, cam->present, cam->open_count);
224 return;
225 }
226
227 /***
228 * Packet collater
229 ***/
230 //DBG("Collating %d packets\n", urb->number_of_packets);
231 for (i = 0; i < urb->number_of_packets; i++) {
232 u16 checksum, iso_checksum;
233 int j;
234 int n = urb->iso_frame_desc[i].actual_length;
235 int st = urb->iso_frame_desc[i].status;
236
237 if(cam->workbuff->status == FRAME_READY) {
238 struct framebuf *ptr;
239 /* Try to find an available buffer */
240 DBG("workbuff full, searching\n");
241 for (ptr = cam->workbuff->next;
242 ptr != cam->workbuff;
243 ptr = ptr->next)
244 {
245 if (ptr->status == FRAME_EMPTY) {
246 ptr->status = FRAME_READING;
247 ptr->length = 0;
248 break;
249 }
250 }
251 if (ptr == cam->workbuff)
252 break; /* No READING or EMPTY buffers left */
253
254 cam->workbuff = ptr;
255 }
256
257 if (cam->workbuff->status == FRAME_EMPTY ||
258 cam->workbuff->status == FRAME_ERROR) {
259 cam->workbuff->status = FRAME_READING;
260 cam->workbuff->length = 0;
261 }
262
263 //DBG(" Packet %d length = %d, status = %d\n", i, n, st);
264 cdata = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
265
266 if (st) {
267 LOG("cpia2 data error: [%d] len=%d, status = %d\n",
268 i, n, st);
269 if(!ALLOW_CORRUPT)
270 cam->workbuff->status = FRAME_ERROR;
271 continue;
272 }
273
274 if(n<=2)
275 continue;
276
277 checksum = 0;
278 for(j=0; j<n-2; ++j)
279 checksum += cdata[j];
280 iso_checksum = cdata[j] + cdata[j+1]*256;
281 if(checksum != iso_checksum) {
282 LOG("checksum mismatch: [%d] len=%d, calculated = %x, checksum = %x\n",
283 i, n, (int)checksum, (int)iso_checksum);
284 if(!ALLOW_CORRUPT) {
285 cam->workbuff->status = FRAME_ERROR;
286 continue;
287 }
288 }
289 n -= 2;
290
291 if(cam->workbuff->status != FRAME_READING) {
292 if((0xFF == cdata[0] && 0xD8 == cdata[1]) ||
293 (0xD8 == cdata[0] && 0xFF == cdata[1] &&
294 0 != cdata[2])) {
295 /* frame is skipped, but increment total
296 * frame count anyway */
297 cam->frame_count++;
298 }
299 DBG("workbuff not reading, status=%d\n",
300 cam->workbuff->status);
301 continue;
302 }
303
304 if (cam->frame_size < cam->workbuff->length + n) {
305 ERR("buffer overflow! length: %d, n: %d\n",
306 cam->workbuff->length, n);
307 cam->workbuff->status = FRAME_ERROR;
308 if(cam->workbuff->length > cam->workbuff->max_length)
309 cam->workbuff->max_length =
310 cam->workbuff->length;
311 continue;
312 }
313
314 if (cam->workbuff->length == 0) {
315 int data_offset;
316 if ((0xD8 == cdata[0]) && (0xFF == cdata[1])) {
317 data_offset = 1;
318 } else if((0xFF == cdata[0]) && (0xD8 == cdata[1])
319 && (0xFF == cdata[2])) {
320 data_offset = 2;
321 } else {
322 DBG("Ignoring packet, not beginning!\n");
323 continue;
324 }
325 DBG("Start of frame pattern found\n");
326 do_gettimeofday(&cam->workbuff->timestamp);
327 cam->workbuff->seq = cam->frame_count++;
328 cam->workbuff->data[0] = 0xFF;
329 cam->workbuff->data[1] = 0xD8;
330 cam->workbuff->length = 2;
331 add_APPn(cam);
332 add_COM(cam);
333 memcpy(cam->workbuff->data+cam->workbuff->length,
334 cdata+data_offset, n-data_offset);
335 cam->workbuff->length += n-data_offset;
336 } else if (cam->workbuff->length > 0) {
337 memcpy(cam->workbuff->data + cam->workbuff->length,
338 cdata, n);
339 cam->workbuff->length += n;
340 }
341
342 if ((cam->workbuff->length >= 3) &&
343 (cam->workbuff->data[cam->workbuff->length - 3] == 0xFF) &&
344 (cam->workbuff->data[cam->workbuff->length - 2] == 0xD9) &&
345 (cam->workbuff->data[cam->workbuff->length - 1] == 0xFF)) {
346 frame_ready = true;
347 cam->workbuff->data[cam->workbuff->length - 1] = 0;
348 cam->workbuff->length -= 1;
349 } else if ((cam->workbuff->length >= 2) &&
350 (cam->workbuff->data[cam->workbuff->length - 2] == 0xFF) &&
351 (cam->workbuff->data[cam->workbuff->length - 1] == 0xD9)) {
352 frame_ready = true;
353 }
354
355 if (frame_ready) {
356 DBG("Workbuff image size = %d\n",cam->workbuff->length);
357 process_frame(cam);
358
359 frame_ready = false;
360
361 if (waitqueue_active(&cam->wq_stream))
362 wake_up_interruptible(&cam->wq_stream);
363 }
364 }
365
366 if(cam->streaming) {
367 /* resubmit */
368 urb->dev = cam->dev;
369 if ((i = usb_submit_urb(urb, GFP_ATOMIC)) != 0)
370 ERR("%s: usb_submit_urb ret %d!\n", __func__, i);
371 }
372}
373
374/******************************************************************************
375 *
376 * configure_transfer_mode
377 *
378 *****************************************************************************/
379static int configure_transfer_mode(struct camera_data *cam, unsigned int alt)
380{
381 static unsigned char iso_regs[8][4] = {
382 {0x00, 0x00, 0x00, 0x00},
383 {0x00, 0x00, 0x00, 0x00},
384 {0xB9, 0x00, 0x00, 0x7E},
385 {0xB9, 0x00, 0x01, 0x7E},
386 {0xB9, 0x00, 0x02, 0x7E},
387 {0xB9, 0x00, 0x02, 0xFE},
388 {0xB9, 0x00, 0x03, 0x7E},
389 {0xB9, 0x00, 0x03, 0xFD}
390 };
391 struct cpia2_command cmd;
392 unsigned char reg;
393
394 if(!cam->present)
395 return -ENODEV;
396
397 /***
398 * Write the isoc registers according to the alternate selected
399 ***/
400 cmd.direction = TRANSFER_WRITE;
401 cmd.buffer.block_data[0] = iso_regs[alt][0];
402 cmd.buffer.block_data[1] = iso_regs[alt][1];
403 cmd.buffer.block_data[2] = iso_regs[alt][2];
404 cmd.buffer.block_data[3] = iso_regs[alt][3];
405 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
406 cmd.start = CPIA2_VC_USB_ISOLIM;
407 cmd.reg_count = 4;
408 cpia2_send_command(cam, &cmd);
409
410 /***
411 * Enable relevant streams before starting polling.
412 * First read USB Stream Config Register.
413 ***/
414 cmd.direction = TRANSFER_READ;
415 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
416 cmd.start = CPIA2_VC_USB_STRM;
417 cmd.reg_count = 1;
418 cpia2_send_command(cam, &cmd);
419 reg = cmd.buffer.block_data[0];
420
421 /* Clear iso, bulk, and int */
422 reg &= ~(CPIA2_VC_USB_STRM_BLK_ENABLE |
423 CPIA2_VC_USB_STRM_ISO_ENABLE |
424 CPIA2_VC_USB_STRM_INT_ENABLE);
425
426 if (alt == USBIF_BULK) {
427 DBG("Enabling bulk xfer\n");
428 reg |= CPIA2_VC_USB_STRM_BLK_ENABLE; /* Enable Bulk */
429 cam->xfer_mode = XFER_BULK;
430 } else if (alt >= USBIF_ISO_1) {
431 DBG("Enabling ISOC xfer\n");
432 reg |= CPIA2_VC_USB_STRM_ISO_ENABLE;
433 cam->xfer_mode = XFER_ISOC;
434 }
435
436 cmd.buffer.block_data[0] = reg;
437 cmd.direction = TRANSFER_WRITE;
438 cmd.start = CPIA2_VC_USB_STRM;
439 cmd.reg_count = 1;
440 cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
441 cpia2_send_command(cam, &cmd);
442
443 return 0;
444}
445
446/******************************************************************************
447 *
448 * cpia2_usb_change_streaming_alternate
449 *
450 *****************************************************************************/
451int cpia2_usb_change_streaming_alternate(struct camera_data *cam,
452 unsigned int alt)
453{
454 int ret = 0;
455
456 if(alt < USBIF_ISO_1 || alt > USBIF_ISO_6)
457 return -EINVAL;
458
459 if(alt == cam->params.camera_state.stream_mode)
460 return 0;
461
462 cpia2_usb_stream_pause(cam);
463
464 configure_transfer_mode(cam, alt);
465
466 cam->params.camera_state.stream_mode = alt;
467
468 /* Reset the camera to prevent image quality degradation */
469 cpia2_reset_camera(cam);
470
471 cpia2_usb_stream_resume(cam);
472
473 return ret;
474}
475
476/******************************************************************************
477 *
478 * set_alternate
479 *
480 *****************************************************************************/
481int set_alternate(struct camera_data *cam, unsigned int alt)
482{
483 int ret = 0;
484
485 if(alt == cam->cur_alt)
486 return 0;
487
488 if (cam->cur_alt != USBIF_CMDONLY) {
489 DBG("Changing from alt %d to %d\n", cam->cur_alt, USBIF_CMDONLY);
490 ret = usb_set_interface(cam->dev, cam->iface, USBIF_CMDONLY);
491 if (ret != 0)
492 return ret;
493 }
494 if (alt != USBIF_CMDONLY) {
495 DBG("Changing from alt %d to %d\n", USBIF_CMDONLY, alt);
496 ret = usb_set_interface(cam->dev, cam->iface, alt);
497 if (ret != 0)
498 return ret;
499 }
500
501 cam->old_alt = cam->cur_alt;
502 cam->cur_alt = alt;
503
504 return ret;
505}
506
507/******************************************************************************
508 *
509 * free_sbufs
510 *
511 * Free all cam->sbuf[]. All non-NULL .data and .urb members that are non-NULL
512 * are assumed to be allocated. Non-NULL .urb members are also assumed to be
513 * submitted (and must therefore be killed before they are freed).
514 *****************************************************************************/
515static void free_sbufs(struct camera_data *cam)
516{
517 int i;
518
519 for (i = 0; i < NUM_SBUF; i++) {
520 if(cam->sbuf[i].urb) {
521 usb_kill_urb(cam->sbuf[i].urb);
522 usb_free_urb(cam->sbuf[i].urb);
523 cam->sbuf[i].urb = NULL;
524 }
525 if(cam->sbuf[i].data) {
526 kfree(cam->sbuf[i].data);
527 cam->sbuf[i].data = NULL;
528 }
529 }
530}
531
532/*******
533* Convenience functions
534*******/
535/****************************************************************************
536 *
537 * write_packet
538 *
539 ***************************************************************************/
540static int write_packet(struct usb_device *udev,
541 u8 request, u8 * registers, u16 start, size_t size)
542{
543 if (!registers || size <= 0)
544 return -EINVAL;
545
546 return usb_control_msg(udev,
547 usb_sndctrlpipe(udev, 0),
548 request,
549 USB_TYPE_VENDOR | USB_RECIP_DEVICE,
550 start, /* value */
551 0, /* index */
552 registers, /* buffer */
553 size,
554 HZ);
555}
556
557/****************************************************************************
558 *
559 * read_packet
560 *
561 ***************************************************************************/
562static int read_packet(struct usb_device *udev,
563 u8 request, u8 * registers, u16 start, size_t size)
564{
565 if (!registers || size <= 0)
566 return -EINVAL;
567
568 return usb_control_msg(udev,
569 usb_rcvctrlpipe(udev, 0),
570 request,
571 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
572 start, /* value */
573 0, /* index */
574 registers, /* buffer */
575 size,
576 HZ);
577}
578
579/******************************************************************************
580 *
581 * cpia2_usb_transfer_cmd
582 *
583 *****************************************************************************/
584int cpia2_usb_transfer_cmd(struct camera_data *cam,
585 void *registers,
586 u8 request, u8 start, u8 count, u8 direction)
587{
588 int err = 0;
589 struct usb_device *udev = cam->dev;
590
591 if (!udev) {
592 ERR("%s: Internal driver error: udev is NULL\n", __func__);
593 return -EINVAL;
594 }
595
596 if (!registers) {
597 ERR("%s: Internal driver error: register array is NULL\n", __func__);
598 return -EINVAL;
599 }
600
601 if (direction == TRANSFER_READ) {
602 err = read_packet(udev, request, (u8 *)registers, start, count);
603 if (err > 0)
604 err = 0;
605 } else if (direction == TRANSFER_WRITE) {
606 err =write_packet(udev, request, (u8 *)registers, start, count);
607 if (err < 0) {
608 LOG("Control message failed, err val = %d\n", err);
609 LOG("Message: request = 0x%0X, start = 0x%0X\n",
610 request, start);
611 LOG("Message: count = %d, register[0] = 0x%0X\n",
612 count, ((unsigned char *) registers)[0]);
613 } else
614 err=0;
615 } else {
616 LOG("Unexpected first byte of direction: %d\n",
617 direction);
618 return -EINVAL;
619 }
620
621 if(err != 0)
622 LOG("Unexpected error: %d\n", err);
623 return err;
624}
625
626
627/******************************************************************************
628 *
629 * submit_urbs
630 *
631 *****************************************************************************/
632static int submit_urbs(struct camera_data *cam)
633{
634 struct urb *urb;
635 int fx, err, i;
636
637 for(i=0; i<NUM_SBUF; ++i) {
638 if (cam->sbuf[i].data)
639 continue;
640 cam->sbuf[i].data =
641 kmalloc(FRAMES_PER_DESC * FRAME_SIZE_PER_DESC, GFP_KERNEL);
642 if (!cam->sbuf[i].data) {
643 return -ENOMEM;
644 }
645 }
646
647 /* We double buffer the Isoc lists, and also know the polling
648 * interval is every frame (1 == (1 << (bInterval -1))).
649 */
650 for(i=0; i<NUM_SBUF; ++i) {
651 if(cam->sbuf[i].urb) {
652 continue;
653 }
654 urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL);
655 if (!urb) {
656 return -ENOMEM;
657 }
658
659 cam->sbuf[i].urb = urb;
660 urb->dev = cam->dev;
661 urb->context = cam;
662 urb->pipe = usb_rcvisocpipe(cam->dev, 1 /*ISOC endpoint*/);
663 urb->transfer_flags = URB_ISO_ASAP;
664 urb->transfer_buffer = cam->sbuf[i].data;
665 urb->complete = cpia2_usb_complete;
666 urb->number_of_packets = FRAMES_PER_DESC;
667 urb->interval = 1;
668 urb->transfer_buffer_length =
669 FRAME_SIZE_PER_DESC * FRAMES_PER_DESC;
670
671 for (fx = 0; fx < FRAMES_PER_DESC; fx++) {
672 urb->iso_frame_desc[fx].offset =
673 FRAME_SIZE_PER_DESC * fx;
674 urb->iso_frame_desc[fx].length = FRAME_SIZE_PER_DESC;
675 }
676 }
677
678
679 /* Queue the ISO urbs, and resubmit in the completion handler */
680 for(i=0; i<NUM_SBUF; ++i) {
681 err = usb_submit_urb(cam->sbuf[i].urb, GFP_KERNEL);
682 if (err) {
683 ERR("usb_submit_urb[%d]() = %d\n", i, err);
684 return err;
685 }
686 }
687
688 return 0;
689}
690
691/******************************************************************************
692 *
693 * cpia2_usb_stream_start
694 *
695 *****************************************************************************/
696int cpia2_usb_stream_start(struct camera_data *cam, unsigned int alternate)
697{
698 int ret;
699 int old_alt;
700
701 if(cam->streaming)
702 return 0;
703
704 if (cam->flush) {
705 int i;
706 DBG("Flushing buffers\n");
707 for(i=0; i<cam->num_frames; ++i) {
708 cam->buffers[i].status = FRAME_EMPTY;
709 cam->buffers[i].length = 0;
710 }
711 cam->curbuff = &cam->buffers[0];
712 cam->workbuff = cam->curbuff->next;
713 cam->flush = false;
714 }
715
716 old_alt = cam->params.camera_state.stream_mode;
717 cam->params.camera_state.stream_mode = 0;
718 ret = cpia2_usb_change_streaming_alternate(cam, alternate);
719 if (ret < 0) {
720 int ret2;
721 ERR("cpia2_usb_change_streaming_alternate() = %d!\n", ret);
722 cam->params.camera_state.stream_mode = old_alt;
723 ret2 = set_alternate(cam, USBIF_CMDONLY);
724 if (ret2 < 0) {
725 ERR("cpia2_usb_change_streaming_alternate(%d) =%d has already "
726 "failed. Then tried to call "
727 "set_alternate(USBIF_CMDONLY) = %d.\n",
728 alternate, ret, ret2);
729 }
730 } else {
731 cam->frame_count = 0;
732 cam->streaming = 1;
733 ret = cpia2_usb_stream_resume(cam);
734 }
735 return ret;
736}
737
738/******************************************************************************
739 *
740 * cpia2_usb_stream_pause
741 *
742 *****************************************************************************/
743int cpia2_usb_stream_pause(struct camera_data *cam)
744{
745 int ret = 0;
746 if(cam->streaming) {
747 ret = set_alternate(cam, USBIF_CMDONLY);
748 free_sbufs(cam);
749 }
750 return ret;
751}
752
753/******************************************************************************
754 *
755 * cpia2_usb_stream_resume
756 *
757 *****************************************************************************/
758int cpia2_usb_stream_resume(struct camera_data *cam)
759{
760 int ret = 0;
761 if(cam->streaming) {
762 cam->first_image_seen = 0;
763 ret = set_alternate(cam, cam->params.camera_state.stream_mode);
764 if(ret == 0) {
765 ret = submit_urbs(cam);
766 }
767 }
768 return ret;
769}
770
771/******************************************************************************
772 *
773 * cpia2_usb_stream_stop
774 *
775 *****************************************************************************/
776int cpia2_usb_stream_stop(struct camera_data *cam)
777{
778 int ret;
779 ret = cpia2_usb_stream_pause(cam);
780 cam->streaming = 0;
781 configure_transfer_mode(cam, 0);
782 return ret;
783}
784
785/******************************************************************************
786 *
787 * cpia2_usb_probe
788 *
789 * Probe and initialize.
790 *****************************************************************************/
791static int cpia2_usb_probe(struct usb_interface *intf,
792 const struct usb_device_id *id)
793{
794 struct usb_device *udev = interface_to_usbdev(intf);
795 struct usb_interface_descriptor *interface;
796 struct camera_data *cam;
797 int ret;
798
799 /* A multi-config CPiA2 camera? */
800 if (udev->descriptor.bNumConfigurations != 1)
801 return -ENODEV;
802 interface = &intf->cur_altsetting->desc;
803
804 /* If we get to this point, we found a CPiA2 camera */
805 LOG("CPiA2 USB camera found\n");
806
807 if((cam = cpia2_init_camera_struct()) == NULL)
808 return -ENOMEM;
809
810 cam->dev = udev;
811 cam->iface = interface->bInterfaceNumber;
812
813 ret = set_alternate(cam, USBIF_CMDONLY);
814 if (ret < 0) {
815 ERR("%s: usb_set_interface error (ret = %d)\n", __func__, ret);
816 kfree(cam);
817 return ret;
818 }
819
820 if ((ret = cpia2_register_camera(cam)) < 0) {
821 ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
822 kfree(cam);
823 return ret;
824 }
825
826
827 if((ret = cpia2_init_camera(cam)) < 0) {
828 ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret);
829 cpia2_unregister_camera(cam);
830 kfree(cam);
831 return ret;
832 }
833 LOG(" CPiA Version: %d.%02d (%d.%d)\n",
834 cam->params.version.firmware_revision_hi,
835 cam->params.version.firmware_revision_lo,
836 cam->params.version.asic_id,
837 cam->params.version.asic_rev);
838 LOG(" CPiA PnP-ID: %04x:%04x:%04x\n",
839 cam->params.pnp_id.vendor,
840 cam->params.pnp_id.product,
841 cam->params.pnp_id.device_revision);
842 LOG(" SensorID: %d.(version %d)\n",
843 cam->params.version.sensor_flags,
844 cam->params.version.sensor_rev);
845
846 usb_set_intfdata(intf, cam);
847
848 return 0;
849}
850
851/******************************************************************************
852 *
853 * cpia2_disconnect
854 *
855 *****************************************************************************/
856static void cpia2_usb_disconnect(struct usb_interface *intf)
857{
858 struct camera_data *cam = usb_get_intfdata(intf);
859 usb_set_intfdata(intf, NULL);
860 cam->present = 0;
861
862 DBG("Stopping stream\n");
863 cpia2_usb_stream_stop(cam);
864
865 DBG("Unregistering camera\n");
866 cpia2_unregister_camera(cam);
867
868 if(cam->buffers) {
869 DBG("Wakeup waiting processes\n");
870 cam->curbuff->status = FRAME_READY;
871 cam->curbuff->length = 0;
872 if (waitqueue_active(&cam->wq_stream))
873 wake_up_interruptible(&cam->wq_stream);
874 }
875
876 DBG("Releasing interface\n");
877 usb_driver_release_interface(&cpia2_driver, intf);
878
879 if (cam->open_count == 0) {
880 DBG("Freeing camera structure\n");
881 kfree(cam);
882 }
883
884 LOG("CPiA2 camera disconnected.\n");
885}
886
887
888/******************************************************************************
889 *
890 * usb_cpia2_init
891 *
892 *****************************************************************************/
893int cpia2_usb_init(void)
894{
895 return usb_register(&cpia2_driver);
896}
897
898/******************************************************************************
899 *
900 * usb_cpia_cleanup
901 *
902 *****************************************************************************/
903void cpia2_usb_cleanup(void)
904{
905 schedule_timeout(2 * HZ);
906 usb_deregister(&cpia2_driver);
907}
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
new file mode 100644
index 000000000000..08f8be345fa8
--- /dev/null
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -0,0 +1,2079 @@
1/****************************************************************************
2 *
3 * Filename: cpia2_v4l.c
4 *
5 * Copyright 2001, STMicrolectronics, Inc.
6 * Contact: steve.miller@st.com
7 * Copyright 2001,2005, Scott J. Bertin <scottbertin@yahoo.com>
8 *
9 * Description:
10 * This is a USB driver for CPia2 based video cameras.
11 * The infrastructure of this driver is based on the cpia usb driver by
12 * Jochen Scharrlach and Johannes Erdfeldt.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 *
28 * Stripped of 2.4 stuff ready for main kernel submit by
29 * Alan Cox <alan@redhat.com>
30 ****************************************************************************/
31
32#include <linux/version.h>
33
34#include <linux/config.h>
35
36#include <linux/module.h>
37#include <linux/time.h>
38#include <linux/sched.h>
39#include <linux/slab.h>
40#include <linux/init.h>
41#include <linux/moduleparam.h>
42
43#include "cpia2.h"
44#include "cpia2dev.h"
45
46
47//#define _CPIA2_DEBUG_
48
49#define MAKE_STRING_1(x) #x
50#define MAKE_STRING(x) MAKE_STRING_1(x)
51
52static int video_nr = -1;
53module_param(video_nr, int, 0);
54MODULE_PARM_DESC(video_nr,"video device to register (0=/dev/video0, etc)");
55
56static int buffer_size = 68*1024;
57module_param(buffer_size, int, 0);
58MODULE_PARM_DESC(buffer_size, "Size for each frame buffer in bytes (default 68k)");
59
60static int num_buffers = 3;
61module_param(num_buffers, int, 0);
62MODULE_PARM_DESC(num_buffers, "Number of frame buffers (1-"
63 MAKE_STRING(VIDEO_MAX_FRAME) ", default 3)");
64
65static int alternate = DEFAULT_ALT;
66module_param(alternate, int, 0);
67MODULE_PARM_DESC(alternate, "USB Alternate (" MAKE_STRING(USBIF_ISO_1) "-"
68 MAKE_STRING(USBIF_ISO_6) ", default "
69 MAKE_STRING(DEFAULT_ALT) ")");
70
71static int flicker_freq = 60;
72module_param(flicker_freq, int, 0);
73MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" MAKE_STRING(50) "or"
74 MAKE_STRING(60) ", default "
75 MAKE_STRING(60) ")");
76
77static int flicker_mode = NEVER_FLICKER;
78module_param(flicker_mode, int, 0);
79MODULE_PARM_DESC(flicker_mode,
80 "Flicker supression (" MAKE_STRING(NEVER_FLICKER) "or"
81 MAKE_STRING(ANTI_FLICKER_ON) ", default "
82 MAKE_STRING(NEVER_FLICKER) ")");
83
84MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
85MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
86MODULE_SUPPORTED_DEVICE("video");
87MODULE_LICENSE("GPL");
88
89#define ABOUT "V4L-Driver for Vision CPiA2 based cameras"
90
91#ifndef VID_HARDWARE_CPIA2
92#error "VID_HARDWARE_CPIA2 should have been defined in linux/videodev.h"
93#endif
94
95struct control_menu_info {
96 int value;
97 char name[32];
98};
99
100static struct control_menu_info framerate_controls[] =
101{
102 { CPIA2_VP_FRAMERATE_6_25, "6.25 fps" },
103 { CPIA2_VP_FRAMERATE_7_5, "7.5 fps" },
104 { CPIA2_VP_FRAMERATE_12_5, "12.5 fps" },
105 { CPIA2_VP_FRAMERATE_15, "15 fps" },
106 { CPIA2_VP_FRAMERATE_25, "25 fps" },
107 { CPIA2_VP_FRAMERATE_30, "30 fps" },
108};
109#define NUM_FRAMERATE_CONTROLS (sizeof(framerate_controls)/sizeof(framerate_controls[0]))
110
111static struct control_menu_info flicker_controls[] =
112{
113 { NEVER_FLICKER, "Off" },
114 { FLICKER_50, "50 Hz" },
115 { FLICKER_60, "60 Hz" },
116};
117#define NUM_FLICKER_CONTROLS (sizeof(flicker_controls)/sizeof(flicker_controls[0]))
118
119static struct control_menu_info lights_controls[] =
120{
121 { 0, "Off" },
122 { 64, "Top" },
123 { 128, "Bottom" },
124 { 192, "Both" },
125};
126#define NUM_LIGHTS_CONTROLS (sizeof(lights_controls)/sizeof(lights_controls[0]))
127#define GPIO_LIGHTS_MASK 192
128
129static struct v4l2_queryctrl controls[] = {
130 {
131 .id = V4L2_CID_BRIGHTNESS,
132 .type = V4L2_CTRL_TYPE_INTEGER,
133 .name = "Brightness",
134 .minimum = 0,
135 .maximum = 255,
136 .step = 1,
137 .default_value = DEFAULT_BRIGHTNESS,
138 },
139 {
140 .id = V4L2_CID_CONTRAST,
141 .type = V4L2_CTRL_TYPE_INTEGER,
142 .name = "Contrast",
143 .minimum = 0,
144 .maximum = 255,
145 .step = 1,
146 .default_value = DEFAULT_CONTRAST,
147 },
148 {
149 .id = V4L2_CID_SATURATION,
150 .type = V4L2_CTRL_TYPE_INTEGER,
151 .name = "Saturation",
152 .minimum = 0,
153 .maximum = 255,
154 .step = 1,
155 .default_value = DEFAULT_SATURATION,
156 },
157 {
158 .id = V4L2_CID_HFLIP,
159 .type = V4L2_CTRL_TYPE_BOOLEAN,
160 .name = "Mirror Horizontally",
161 .minimum = 0,
162 .maximum = 1,
163 .step = 1,
164 .default_value = 0,
165 },
166 {
167 .id = V4L2_CID_VFLIP,
168 .type = V4L2_CTRL_TYPE_BOOLEAN,
169 .name = "Flip Vertically",
170 .minimum = 0,
171 .maximum = 1,
172 .step = 1,
173 .default_value = 0,
174 },
175 {
176 .id = CPIA2_CID_TARGET_KB,
177 .type = V4L2_CTRL_TYPE_INTEGER,
178 .name = "Target KB",
179 .minimum = 0,
180 .maximum = 255,
181 .step = 1,
182 .default_value = DEFAULT_TARGET_KB,
183 },
184 {
185 .id = CPIA2_CID_GPIO,
186 .type = V4L2_CTRL_TYPE_INTEGER,
187 .name = "GPIO",
188 .minimum = 0,
189 .maximum = 255,
190 .step = 1,
191 .default_value = 0,
192 },
193 {
194 .id = CPIA2_CID_FLICKER_MODE,
195 .type = V4L2_CTRL_TYPE_MENU,
196 .name = "Flicker Reduction",
197 .minimum = 0,
198 .maximum = NUM_FLICKER_CONTROLS-1,
199 .step = 1,
200 .default_value = 0,
201 },
202 {
203 .id = CPIA2_CID_FRAMERATE,
204 .type = V4L2_CTRL_TYPE_MENU,
205 .name = "Framerate",
206 .minimum = 0,
207 .maximum = NUM_FRAMERATE_CONTROLS-1,
208 .step = 1,
209 .default_value = NUM_FRAMERATE_CONTROLS-1,
210 },
211 {
212 .id = CPIA2_CID_USB_ALT,
213 .type = V4L2_CTRL_TYPE_INTEGER,
214 .name = "USB Alternate",
215 .minimum = USBIF_ISO_1,
216 .maximum = USBIF_ISO_6,
217 .step = 1,
218 .default_value = DEFAULT_ALT,
219 },
220 {
221 .id = CPIA2_CID_LIGHTS,
222 .type = V4L2_CTRL_TYPE_MENU,
223 .name = "Lights",
224 .minimum = 0,
225 .maximum = NUM_LIGHTS_CONTROLS-1,
226 .step = 1,
227 .default_value = 0,
228 },
229 {
230 .id = CPIA2_CID_RESET_CAMERA,
231 .type = V4L2_CTRL_TYPE_BUTTON,
232 .name = "Reset Camera",
233 .minimum = 0,
234 .maximum = 0,
235 .step = 0,
236 .default_value = 0,
237 },
238};
239#define NUM_CONTROLS (sizeof(controls)/sizeof(controls[0]))
240
241
242/******************************************************************************
243 *
244 * cpia2_open
245 *
246 *****************************************************************************/
247static int cpia2_open(struct inode *inode, struct file *file)
248{
249 struct video_device *dev = video_devdata(file);
250 struct camera_data *cam = video_get_drvdata(dev);
251 int retval = 0;
252
253 if (!cam) {
254 ERR("Internal error, camera_data not found!\n");
255 return -ENODEV;
256 }
257
258 if(down_interruptible(&cam->busy_lock))
259 return -ERESTARTSYS;
260
261 if(!cam->present) {
262 retval = -ENODEV;
263 goto err_return;
264 }
265
266 if (cam->open_count > 0) {
267 goto skip_init;
268 }
269
270 if (cpia2_allocate_buffers(cam)) {
271 retval = -ENOMEM;
272 goto err_return;
273 }
274
275 /* reset the camera */
276 if (cpia2_reset_camera(cam) < 0) {
277 retval = -EIO;
278 goto err_return;
279 }
280
281 cam->APP_len = 0;
282 cam->COM_len = 0;
283
284skip_init:
285 {
286 struct cpia2_fh *fh = kmalloc(sizeof(*fh),GFP_KERNEL);
287 if(!fh) {
288 retval = -ENOMEM;
289 goto err_return;
290 }
291 file->private_data = fh;
292 fh->prio = V4L2_PRIORITY_UNSET;
293 v4l2_prio_open(&cam->prio, &fh->prio);
294 fh->mmapped = 0;
295 }
296
297 ++cam->open_count;
298
299 cpia2_dbg_dump_registers(cam);
300
301err_return:
302 up(&cam->busy_lock);
303 return retval;
304}
305
306/******************************************************************************
307 *
308 * cpia2_close
309 *
310 *****************************************************************************/
311static int cpia2_close(struct inode *inode, struct file *file)
312{
313 struct video_device *dev = video_devdata(file);
314 struct camera_data *cam = video_get_drvdata(dev);
315 struct cpia2_fh *fh = file->private_data;
316
317 down(&cam->busy_lock);
318
319 if (cam->present &&
320 (cam->open_count == 1
321 || fh->prio == V4L2_PRIORITY_RECORD
322 )) {
323 cpia2_usb_stream_stop(cam);
324
325 if(cam->open_count == 1) {
326 /* save camera state for later open */
327 cpia2_save_camera_state(cam);
328
329 cpia2_set_low_power(cam);
330 cpia2_free_buffers(cam);
331 }
332 }
333
334 {
335 if(fh->mmapped)
336 cam->mmapped = 0;
337 v4l2_prio_close(&cam->prio,&fh->prio);
338 file->private_data = NULL;
339 kfree(fh);
340 }
341
342 if (--cam->open_count == 0) {
343 cpia2_free_buffers(cam);
344 if (!cam->present) {
345 video_unregister_device(dev);
346 kfree(cam);
347 }
348 }
349
350 up(&cam->busy_lock);
351
352 return 0;
353}
354
355/******************************************************************************
356 *
357 * cpia2_v4l_read
358 *
359 *****************************************************************************/
360static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
361 loff_t *off)
362{
363 struct video_device *dev = video_devdata(file);
364 struct camera_data *cam = video_get_drvdata(dev);
365 int noblock = file->f_flags&O_NONBLOCK;
366
367 struct cpia2_fh *fh = file->private_data;
368
369 if(!cam)
370 return -EINVAL;
371
372 /* Priority check */
373 if(fh->prio != V4L2_PRIORITY_RECORD) {
374 return -EBUSY;
375 }
376
377 return cpia2_read(cam, buf, count, noblock);
378}
379
380
381/******************************************************************************
382 *
383 * cpia2_v4l_poll
384 *
385 *****************************************************************************/
386static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
387{
388 struct video_device *dev = video_devdata(filp);
389 struct camera_data *cam = video_get_drvdata(dev);
390
391 struct cpia2_fh *fh = filp->private_data;
392
393 if(!cam)
394 return POLLERR;
395
396 /* Priority check */
397 if(fh->prio != V4L2_PRIORITY_RECORD) {
398 return POLLERR;
399 }
400
401 return cpia2_poll(cam, filp, wait);
402}
403
404
405/******************************************************************************
406 *
407 * ioctl_cap_query
408 *
409 *****************************************************************************/
410static int ioctl_cap_query(void *arg, struct camera_data *cam)
411{
412 struct video_capability *vc;
413 int retval = 0;
414 vc = arg;
415
416 if (cam->params.pnp_id.product == 0x151)
417 strcpy(vc->name, "QX5 Microscope");
418 else
419 strcpy(vc->name, "CPiA2 Camera");
420
421 vc->type = VID_TYPE_CAPTURE | VID_TYPE_MJPEG_ENCODER;
422 vc->channels = 1;
423 vc->audios = 0;
424 vc->minwidth = 176; /* VIDEOSIZE_QCIF */
425 vc->minheight = 144;
426 switch (cam->params.version.sensor_flags) {
427 case CPIA2_VP_SENSOR_FLAGS_500:
428 vc->maxwidth = STV_IMAGE_VGA_COLS;
429 vc->maxheight = STV_IMAGE_VGA_ROWS;
430 break;
431 case CPIA2_VP_SENSOR_FLAGS_410:
432 vc->maxwidth = STV_IMAGE_CIF_COLS;
433 vc->maxheight = STV_IMAGE_CIF_ROWS;
434 break;
435 default:
436 return -EINVAL;
437 }
438
439 return retval;
440}
441
442/******************************************************************************
443 *
444 * ioctl_get_channel
445 *
446 *****************************************************************************/
447static int ioctl_get_channel(void *arg)
448{
449 int retval = 0;
450 struct video_channel *v;
451 v = arg;
452
453 if (v->channel != 0)
454 return -EINVAL;
455
456 v->channel = 0;
457 strcpy(v->name, "Camera");
458 v->tuners = 0;
459 v->flags = 0;
460 v->type = VIDEO_TYPE_CAMERA;
461 v->norm = 0;
462
463 return retval;
464}
465
466/******************************************************************************
467 *
468 * ioctl_set_channel
469 *
470 *****************************************************************************/
471static int ioctl_set_channel(void *arg)
472{
473 struct video_channel *v;
474 int retval = 0;
475 v = arg;
476
477 if (retval == 0 && v->channel != 0)
478 retval = -EINVAL;
479
480 return retval;
481}
482
483/******************************************************************************
484 *
485 * ioctl_set_image_prop
486 *
487 *****************************************************************************/
488static int ioctl_set_image_prop(void *arg, struct camera_data *cam)
489{
490 struct video_picture *vp;
491 int retval = 0;
492 vp = arg;
493
494 /* brightness, color, contrast need no check 0-65535 */
495 memcpy(&cam->vp, vp, sizeof(*vp));
496
497 /* update cam->params.colorParams */
498 cam->params.color_params.brightness = vp->brightness / 256;
499 cam->params.color_params.saturation = vp->colour / 256;
500 cam->params.color_params.contrast = vp->contrast / 256;
501
502 DBG("Requested params: bright 0x%X, sat 0x%X, contrast 0x%X\n",
503 cam->params.color_params.brightness,
504 cam->params.color_params.saturation,
505 cam->params.color_params.contrast);
506
507 cpia2_set_color_params(cam);
508
509 return retval;
510}
511
512static int sync(struct camera_data *cam, int frame_nr)
513{
514 struct framebuf *frame = &cam->buffers[frame_nr];
515
516 while (1) {
517 if (frame->status == FRAME_READY)
518 return 0;
519
520 if (!cam->streaming) {
521 frame->status = FRAME_READY;
522 frame->length = 0;
523 return 0;
524 }
525
526 up(&cam->busy_lock);
527 wait_event_interruptible(cam->wq_stream,
528 !cam->streaming ||
529 frame->status == FRAME_READY);
530 down(&cam->busy_lock);
531 if (signal_pending(current))
532 return -ERESTARTSYS;
533 if(!cam->present)
534 return -ENOTTY;
535 }
536}
537
538/******************************************************************************
539 *
540 * ioctl_set_window_size
541 *
542 *****************************************************************************/
543static int ioctl_set_window_size(void *arg, struct camera_data *cam,
544 struct cpia2_fh *fh)
545{
546 /* copy_from_user, check validity, copy to internal structure */
547 struct video_window *vw;
548 int frame, err;
549 vw = arg;
550
551 if (vw->clipcount != 0) /* clipping not supported */
552 return -EINVAL;
553
554 if (vw->clips != NULL) /* clipping not supported */
555 return -EINVAL;
556
557 /* Ensure that only this process can change the format. */
558 err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
559 if(err != 0)
560 return err;
561
562 cam->pixelformat = V4L2_PIX_FMT_JPEG;
563
564 /* Be sure to supply the Huffman tables, this isn't MJPEG */
565 cam->params.compression.inhibit_htables = 0;
566
567 /* we set the video window to something smaller or equal to what
568 * is requested by the user???
569 */
570 DBG("Requested width = %d, height = %d\n", vw->width, vw->height);
571 if (vw->width != cam->vw.width || vw->height != cam->vw.height) {
572 cam->vw.width = vw->width;
573 cam->vw.height = vw->height;
574 cam->params.roi.width = vw->width;
575 cam->params.roi.height = vw->height;
576 cpia2_set_format(cam);
577 }
578
579 for (frame = 0; frame < cam->num_frames; ++frame) {
580 if (cam->buffers[frame].status == FRAME_READING)
581 if ((err = sync(cam, frame)) < 0)
582 return err;
583
584 cam->buffers[frame].status = FRAME_EMPTY;
585 }
586
587 return 0;
588}
589
590/******************************************************************************
591 *
592 * ioctl_get_mbuf
593 *
594 *****************************************************************************/
595static int ioctl_get_mbuf(void *arg, struct camera_data *cam)
596{
597 struct video_mbuf *vm;
598 int i;
599 vm = arg;
600
601 memset(vm, 0, sizeof(*vm));
602 vm->size = cam->frame_size*cam->num_frames;
603 vm->frames = cam->num_frames;
604 for (i = 0; i < cam->num_frames; i++)
605 vm->offsets[i] = cam->frame_size * i;
606
607 return 0;
608}
609
610/******************************************************************************
611 *
612 * ioctl_mcapture
613 *
614 *****************************************************************************/
615static int ioctl_mcapture(void *arg, struct camera_data *cam,
616 struct cpia2_fh *fh)
617{
618 struct video_mmap *vm;
619 int video_size, err;
620 vm = arg;
621
622 if (vm->frame < 0 || vm->frame >= cam->num_frames)
623 return -EINVAL;
624
625 /* set video size */
626 video_size = cpia2_match_video_size(vm->width, vm->height);
627 if (cam->video_size < 0) {
628 return -EINVAL;
629 }
630
631 /* Ensure that only this process can change the format. */
632 err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
633 if(err != 0)
634 return err;
635
636 if (video_size != cam->video_size) {
637 cam->video_size = video_size;
638 cam->params.roi.width = vm->width;
639 cam->params.roi.height = vm->height;
640 cpia2_set_format(cam);
641 }
642
643 if (cam->buffers[vm->frame].status == FRAME_READING)
644 if ((err=sync(cam, vm->frame)) < 0)
645 return err;
646
647 cam->buffers[vm->frame].status = FRAME_EMPTY;
648
649 return cpia2_usb_stream_start(cam,cam->params.camera_state.stream_mode);
650}
651
652/******************************************************************************
653 *
654 * ioctl_sync
655 *
656 *****************************************************************************/
657static int ioctl_sync(void *arg, struct camera_data *cam)
658{
659 int frame;
660
661 frame = *(int*)arg;
662
663 if (frame < 0 || frame >= cam->num_frames)
664 return -EINVAL;
665
666 return sync(cam, frame);
667}
668
669
670/******************************************************************************
671 *
672 * ioctl_set_gpio
673 *
674 *****************************************************************************/
675
676static int ioctl_set_gpio(void *arg, struct camera_data *cam)
677{
678 __u32 gpio_val;
679
680 gpio_val = *(__u32*) arg;
681
682 if (gpio_val &~ 0xFFU)
683 return -EINVAL;
684
685 return cpia2_set_gpio(cam, (unsigned char)gpio_val);
686}
687
688/******************************************************************************
689 *
690 * ioctl_querycap
691 *
692 * V4L2 device capabilities
693 *
694 *****************************************************************************/
695
696static int ioctl_querycap(void *arg, struct camera_data *cam)
697{
698 struct v4l2_capability *vc = arg;
699
700 memset(vc, 0, sizeof(*vc));
701 strcpy(vc->driver, "cpia2");
702
703 if (cam->params.pnp_id.product == 0x151)
704 strcpy(vc->card, "QX5 Microscope");
705 else
706 strcpy(vc->card, "CPiA2 Camera");
707 switch (cam->params.pnp_id.device_type) {
708 case DEVICE_STV_672:
709 strcat(vc->card, " (672/");
710 break;
711 case DEVICE_STV_676:
712 strcat(vc->card, " (676/");
713 break;
714 default:
715 strcat(vc->card, " (???/");
716 break;
717 }
718 switch (cam->params.version.sensor_flags) {
719 case CPIA2_VP_SENSOR_FLAGS_404:
720 strcat(vc->card, "404)");
721 break;
722 case CPIA2_VP_SENSOR_FLAGS_407:
723 strcat(vc->card, "407)");
724 break;
725 case CPIA2_VP_SENSOR_FLAGS_409:
726 strcat(vc->card, "409)");
727 break;
728 case CPIA2_VP_SENSOR_FLAGS_410:
729 strcat(vc->card, "410)");
730 break;
731 case CPIA2_VP_SENSOR_FLAGS_500:
732 strcat(vc->card, "500)");
733 break;
734 default:
735 strcat(vc->card, "???)");
736 break;
737 }
738
739 if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0)
740 memset(vc->bus_info,0, sizeof(vc->bus_info));
741
742 vc->version = KERNEL_VERSION(CPIA2_MAJ_VER, CPIA2_MIN_VER,
743 CPIA2_PATCH_VER);
744
745 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
746 V4L2_CAP_READWRITE |
747 V4L2_CAP_STREAMING;
748
749 return 0;
750}
751
752/******************************************************************************
753 *
754 * ioctl_input
755 *
756 * V4L2 input get/set/enumerate
757 *
758 *****************************************************************************/
759
760static int ioctl_input(unsigned int ioclt_nr,void *arg,struct camera_data *cam)
761{
762 struct v4l2_input *i = arg;
763
764 if(ioclt_nr != VIDIOC_G_INPUT) {
765 if (i->index != 0)
766 return -EINVAL;
767 }
768
769 memset(i, 0, sizeof(*i));
770 strcpy(i->name, "Camera");
771 i->type = V4L2_INPUT_TYPE_CAMERA;
772
773 return 0;
774}
775
776/******************************************************************************
777 *
778 * ioctl_enum_fmt
779 *
780 * V4L2 format enumerate
781 *
782 *****************************************************************************/
783
784static int ioctl_enum_fmt(void *arg,struct camera_data *cam)
785{
786 struct v4l2_fmtdesc *f = arg;
787 int index = f->index;
788
789 if (index < 0 || index > 1)
790 return -EINVAL;
791
792 memset(f, 0, sizeof(*f));
793 f->index = index;
794 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
795 f->flags = V4L2_FMT_FLAG_COMPRESSED;
796 switch(index) {
797 case 0:
798 strcpy(f->description, "MJPEG");
799 f->pixelformat = V4L2_PIX_FMT_MJPEG;
800 break;
801 case 1:
802 strcpy(f->description, "JPEG");
803 f->pixelformat = V4L2_PIX_FMT_JPEG;
804 break;
805 default:
806 return -EINVAL;
807 }
808
809 return 0;
810}
811
812/******************************************************************************
813 *
814 * ioctl_try_fmt
815 *
816 * V4L2 format try
817 *
818 *****************************************************************************/
819
820static int ioctl_try_fmt(void *arg,struct camera_data *cam)
821{
822 struct v4l2_format *f = arg;
823
824 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
825 return -EINVAL;
826
827 if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG &&
828 f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG)
829 return -EINVAL;
830
831 f->fmt.pix.field = V4L2_FIELD_NONE;
832 f->fmt.pix.bytesperline = 0;
833 f->fmt.pix.sizeimage = cam->frame_size;
834 f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
835 f->fmt.pix.priv = 0;
836
837 switch (cpia2_match_video_size(f->fmt.pix.width, f->fmt.pix.height)) {
838 case VIDEOSIZE_VGA:
839 f->fmt.pix.width = 640;
840 f->fmt.pix.height = 480;
841 break;
842 case VIDEOSIZE_CIF:
843 f->fmt.pix.width = 352;
844 f->fmt.pix.height = 288;
845 break;
846 case VIDEOSIZE_QVGA:
847 f->fmt.pix.width = 320;
848 f->fmt.pix.height = 240;
849 break;
850 case VIDEOSIZE_288_216:
851 f->fmt.pix.width = 288;
852 f->fmt.pix.height = 216;
853 break;
854 case VIDEOSIZE_256_192:
855 f->fmt.pix.width = 256;
856 f->fmt.pix.height = 192;
857 break;
858 case VIDEOSIZE_224_168:
859 f->fmt.pix.width = 224;
860 f->fmt.pix.height = 168;
861 break;
862 case VIDEOSIZE_192_144:
863 f->fmt.pix.width = 192;
864 f->fmt.pix.height = 144;
865 break;
866 case VIDEOSIZE_QCIF:
867 default:
868 f->fmt.pix.width = 176;
869 f->fmt.pix.height = 144;
870 break;
871 }
872
873 return 0;
874}
875
876/******************************************************************************
877 *
878 * ioctl_set_fmt
879 *
880 * V4L2 format set
881 *
882 *****************************************************************************/
883
884static int ioctl_set_fmt(void *arg,struct camera_data *cam, struct cpia2_fh *fh)
885{
886 struct v4l2_format *f = arg;
887 int err, frame;
888
889 err = ioctl_try_fmt(arg, cam);
890 if(err != 0)
891 return err;
892
893 /* Ensure that only this process can change the format. */
894 err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
895 if(err != 0) {
896 return err;
897 }
898
899 cam->pixelformat = f->fmt.pix.pixelformat;
900
901 /* NOTE: This should be set to 1 for MJPEG, but some apps don't handle
902 * the missing Huffman table properly. */
903 cam->params.compression.inhibit_htables = 0;
904 /*f->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG;*/
905
906 /* we set the video window to something smaller or equal to what
907 * is requested by the user???
908 */
909 DBG("Requested width = %d, height = %d\n",
910 f->fmt.pix.width, f->fmt.pix.height);
911 if (f->fmt.pix.width != cam->vw.width ||
912 f->fmt.pix.height != cam->vw.height) {
913 cam->vw.width = f->fmt.pix.width;
914 cam->vw.height = f->fmt.pix.height;
915 cam->params.roi.width = f->fmt.pix.width;
916 cam->params.roi.height = f->fmt.pix.height;
917 cpia2_set_format(cam);
918 }
919
920 for (frame = 0; frame < cam->num_frames; ++frame) {
921 if (cam->buffers[frame].status == FRAME_READING)
922 if ((err = sync(cam, frame)) < 0)
923 return err;
924
925 cam->buffers[frame].status = FRAME_EMPTY;
926 }
927
928 return 0;
929}
930
931/******************************************************************************
932 *
933 * ioctl_get_fmt
934 *
935 * V4L2 format get
936 *
937 *****************************************************************************/
938
939static int ioctl_get_fmt(void *arg,struct camera_data *cam)
940{
941 struct v4l2_format *f = arg;
942
943 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
944 return -EINVAL;
945
946 f->fmt.pix.width = cam->vw.width;
947 f->fmt.pix.height = cam->vw.height;
948 f->fmt.pix.pixelformat = cam->pixelformat;
949 f->fmt.pix.field = V4L2_FIELD_NONE;
950 f->fmt.pix.bytesperline = 0;
951 f->fmt.pix.sizeimage = cam->frame_size;
952 f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
953 f->fmt.pix.priv = 0;
954
955 return 0;
956}
957
958/******************************************************************************
959 *
960 * ioctl_cropcap
961 *
962 * V4L2 query cropping capabilities
963 * NOTE: cropping is currently disabled
964 *
965 *****************************************************************************/
966
967static int ioctl_cropcap(void *arg,struct camera_data *cam)
968{
969 struct v4l2_cropcap *c = arg;
970
971 if (c->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
972 return -EINVAL;
973
974 c->bounds.left = 0;
975 c->bounds.top = 0;
976 c->bounds.width = cam->vw.width;
977 c->bounds.height = cam->vw.height;
978 c->defrect.left = 0;
979 c->defrect.top = 0;
980 c->defrect.width = cam->vw.width;
981 c->defrect.height = cam->vw.height;
982 c->pixelaspect.numerator = 1;
983 c->pixelaspect.denominator = 1;
984
985 return 0;
986}
987
988/******************************************************************************
989 *
990 * ioctl_queryctrl
991 *
992 * V4L2 query possible control variables
993 *
994 *****************************************************************************/
995
996static int ioctl_queryctrl(void *arg,struct camera_data *cam)
997{
998 struct v4l2_queryctrl *c = arg;
999 int i;
1000
1001 for(i=0; i<NUM_CONTROLS; ++i) {
1002 if(c->id == controls[i].id) {
1003 memcpy(c, controls+i, sizeof(*c));
1004 break;
1005 }
1006 }
1007
1008 if(i == NUM_CONTROLS)
1009 return -EINVAL;
1010
1011 /* Some devices have additional limitations */
1012 switch(c->id) {
1013 case V4L2_CID_BRIGHTNESS:
1014 /***
1015 * Don't let the register be set to zero - bug in VP4
1016 * flash of full brightness
1017 ***/
1018 if (cam->params.pnp_id.device_type == DEVICE_STV_672)
1019 c->minimum = 1;
1020 break;
1021 case V4L2_CID_VFLIP:
1022 // VP5 Only
1023 if(cam->params.pnp_id.device_type == DEVICE_STV_672)
1024 c->flags |= V4L2_CTRL_FLAG_DISABLED;
1025 break;
1026 case CPIA2_CID_FRAMERATE:
1027 if(cam->params.pnp_id.device_type == DEVICE_STV_672 &&
1028 cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){
1029 // Maximum 15fps
1030 int i;
1031 for(i=0; i<c->maximum; ++i) {
1032 if(framerate_controls[i].value ==
1033 CPIA2_VP_FRAMERATE_15) {
1034 c->maximum = i;
1035 c->default_value = i;
1036 }
1037 }
1038 }
1039 break;
1040 case CPIA2_CID_FLICKER_MODE:
1041 // Flicker control only valid for 672.
1042 if(cam->params.pnp_id.device_type != DEVICE_STV_672)
1043 c->flags |= V4L2_CTRL_FLAG_DISABLED;
1044 break;
1045 case CPIA2_CID_LIGHTS:
1046 // Light control only valid for the QX5 Microscope.
1047 if(cam->params.pnp_id.product != 0x151)
1048 c->flags |= V4L2_CTRL_FLAG_DISABLED;
1049 break;
1050 default:
1051 break;
1052 }
1053
1054 return 0;
1055}
1056
1057/******************************************************************************
1058 *
1059 * ioctl_querymenu
1060 *
1061 * V4L2 query possible control variables
1062 *
1063 *****************************************************************************/
1064
1065static int ioctl_querymenu(void *arg,struct camera_data *cam)
1066{
1067 struct v4l2_querymenu *m = arg;
1068
1069 memset(m->name, 0, sizeof(m->name));
1070 m->reserved = 0;
1071
1072 switch(m->id) {
1073 case CPIA2_CID_FLICKER_MODE:
1074 if(m->index < 0 || m->index >= NUM_FLICKER_CONTROLS)
1075 return -EINVAL;
1076
1077 strcpy(m->name, flicker_controls[m->index].name);
1078 break;
1079 case CPIA2_CID_FRAMERATE:
1080 {
1081 int maximum = NUM_FRAMERATE_CONTROLS - 1;
1082 if(cam->params.pnp_id.device_type == DEVICE_STV_672 &&
1083 cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){
1084 // Maximum 15fps
1085 int i;
1086 for(i=0; i<maximum; ++i) {
1087 if(framerate_controls[i].value ==
1088 CPIA2_VP_FRAMERATE_15)
1089 maximum = i;
1090 }
1091 }
1092 if(m->index < 0 || m->index > maximum)
1093 return -EINVAL;
1094
1095 strcpy(m->name, framerate_controls[m->index].name);
1096 break;
1097 }
1098 case CPIA2_CID_LIGHTS:
1099 if(m->index < 0 || m->index >= NUM_LIGHTS_CONTROLS)
1100 return -EINVAL;
1101
1102 strcpy(m->name, lights_controls[m->index].name);
1103 break;
1104 default:
1105 return -EINVAL;
1106 }
1107
1108 return 0;
1109}
1110
1111/******************************************************************************
1112 *
1113 * ioctl_g_ctrl
1114 *
1115 * V4L2 get the value of a control variable
1116 *
1117 *****************************************************************************/
1118
1119static int ioctl_g_ctrl(void *arg,struct camera_data *cam)
1120{
1121 struct v4l2_control *c = arg;
1122
1123 switch(c->id) {
1124 case V4L2_CID_BRIGHTNESS:
1125 cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS,
1126 TRANSFER_READ, 0);
1127 c->value = cam->params.color_params.brightness;
1128 break;
1129 case V4L2_CID_CONTRAST:
1130 cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST,
1131 TRANSFER_READ, 0);
1132 c->value = cam->params.color_params.contrast;
1133 break;
1134 case V4L2_CID_SATURATION:
1135 cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION,
1136 TRANSFER_READ, 0);
1137 c->value = cam->params.color_params.saturation;
1138 break;
1139 case V4L2_CID_HFLIP:
1140 cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS,
1141 TRANSFER_READ, 0);
1142 c->value = (cam->params.vp_params.user_effects &
1143 CPIA2_VP_USER_EFFECTS_MIRROR) != 0;
1144 break;
1145 case V4L2_CID_VFLIP:
1146 cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS,
1147 TRANSFER_READ, 0);
1148 c->value = (cam->params.vp_params.user_effects &
1149 CPIA2_VP_USER_EFFECTS_FLIP) != 0;
1150 break;
1151 case CPIA2_CID_TARGET_KB:
1152 c->value = cam->params.vc_params.target_kb;
1153 break;
1154 case CPIA2_CID_GPIO:
1155 cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA,
1156 TRANSFER_READ, 0);
1157 c->value = cam->params.vp_params.gpio_data;
1158 break;
1159 case CPIA2_CID_FLICKER_MODE:
1160 {
1161 int i, mode;
1162 cpia2_do_command(cam, CPIA2_CMD_GET_FLICKER_MODES,
1163 TRANSFER_READ, 0);
1164 if(cam->params.flicker_control.cam_register &
1165 CPIA2_VP_FLICKER_MODES_NEVER_FLICKER) {
1166 mode = NEVER_FLICKER;
1167 } else {
1168 if(cam->params.flicker_control.cam_register &
1169 CPIA2_VP_FLICKER_MODES_50HZ) {
1170 mode = FLICKER_50;
1171 } else {
1172 mode = FLICKER_60;
1173 }
1174 }
1175 for(i=0; i<NUM_FLICKER_CONTROLS; i++) {
1176 if(flicker_controls[i].value == mode) {
1177 c->value = i;
1178 break;
1179 }
1180 }
1181 if(i == NUM_FLICKER_CONTROLS)
1182 return -EINVAL;
1183 break;
1184 }
1185 case CPIA2_CID_FRAMERATE:
1186 {
1187 int maximum = NUM_FRAMERATE_CONTROLS - 1;
1188 int i;
1189 for(i=0; i<= maximum; i++) {
1190 if(cam->params.vp_params.frame_rate ==
1191 framerate_controls[i].value)
1192 break;
1193 }
1194 if(i > maximum)
1195 return -EINVAL;
1196 c->value = i;
1197 break;
1198 }
1199 case CPIA2_CID_USB_ALT:
1200 c->value = cam->params.camera_state.stream_mode;
1201 break;
1202 case CPIA2_CID_LIGHTS:
1203 {
1204 int i;
1205 cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA,
1206 TRANSFER_READ, 0);
1207 for(i=0; i<NUM_LIGHTS_CONTROLS; i++) {
1208 if((cam->params.vp_params.gpio_data&GPIO_LIGHTS_MASK) ==
1209 lights_controls[i].value) {
1210 break;
1211 }
1212 }
1213 if(i == NUM_LIGHTS_CONTROLS)
1214 return -EINVAL;
1215 c->value = i;
1216 break;
1217 }
1218 case CPIA2_CID_RESET_CAMERA:
1219 return -EINVAL;
1220 default:
1221 return -EINVAL;
1222 }
1223
1224 DBG("Get control id:%d, value:%d\n", c->id, c->value);
1225
1226 return 0;
1227}
1228
1229/******************************************************************************
1230 *
1231 * ioctl_s_ctrl
1232 *
1233 * V4L2 set the value of a control variable
1234 *
1235 *****************************************************************************/
1236
1237static int ioctl_s_ctrl(void *arg,struct camera_data *cam)
1238{
1239 struct v4l2_control *c = arg;
1240 int i;
1241 int retval = 0;
1242
1243 DBG("Set control id:%d, value:%d\n", c->id, c->value);
1244
1245 /* Check that the value is in range */
1246 for(i=0; i<NUM_CONTROLS; i++) {
1247 if(c->id == controls[i].id) {
1248 if(c->value < controls[i].minimum ||
1249 c->value > controls[i].maximum) {
1250 return -EINVAL;
1251 }
1252 break;
1253 }
1254 }
1255 if(i == NUM_CONTROLS)
1256 return -EINVAL;
1257
1258 switch(c->id) {
1259 case V4L2_CID_BRIGHTNESS:
1260 cpia2_set_brightness(cam, c->value);
1261 break;
1262 case V4L2_CID_CONTRAST:
1263 cpia2_set_contrast(cam, c->value);
1264 break;
1265 case V4L2_CID_SATURATION:
1266 cpia2_set_saturation(cam, c->value);
1267 break;
1268 case V4L2_CID_HFLIP:
1269 cpia2_set_property_mirror(cam, c->value);
1270 break;
1271 case V4L2_CID_VFLIP:
1272 cpia2_set_property_flip(cam, c->value);
1273 break;
1274 case CPIA2_CID_TARGET_KB:
1275 retval = cpia2_set_target_kb(cam, c->value);
1276 break;
1277 case CPIA2_CID_GPIO:
1278 retval = cpia2_set_gpio(cam, c->value);
1279 break;
1280 case CPIA2_CID_FLICKER_MODE:
1281 retval = cpia2_set_flicker_mode(cam,
1282 flicker_controls[c->value].value);
1283 break;
1284 case CPIA2_CID_FRAMERATE:
1285 retval = cpia2_set_fps(cam, framerate_controls[c->value].value);
1286 break;
1287 case CPIA2_CID_USB_ALT:
1288 retval = cpia2_usb_change_streaming_alternate(cam, c->value);
1289 break;
1290 case CPIA2_CID_LIGHTS:
1291 retval = cpia2_set_gpio(cam, lights_controls[c->value].value);
1292 break;
1293 case CPIA2_CID_RESET_CAMERA:
1294 cpia2_usb_stream_pause(cam);
1295 cpia2_reset_camera(cam);
1296 cpia2_usb_stream_resume(cam);
1297 break;
1298 default:
1299 retval = -EINVAL;
1300 }
1301
1302 return retval;
1303}
1304
1305/******************************************************************************
1306 *
1307 * ioctl_g_jpegcomp
1308 *
1309 * V4L2 get the JPEG compression parameters
1310 *
1311 *****************************************************************************/
1312
1313static int ioctl_g_jpegcomp(void *arg,struct camera_data *cam)
1314{
1315 struct v4l2_jpegcompression *parms = arg;
1316
1317 memset(parms, 0, sizeof(*parms));
1318
1319 parms->quality = 80; // TODO: Can this be made meaningful?
1320
1321 parms->jpeg_markers = V4L2_JPEG_MARKER_DQT | V4L2_JPEG_MARKER_DRI;
1322 if(!cam->params.compression.inhibit_htables) {
1323 parms->jpeg_markers |= V4L2_JPEG_MARKER_DHT;
1324 }
1325
1326 parms->APPn = cam->APPn;
1327 parms->APP_len = cam->APP_len;
1328 if(cam->APP_len > 0) {
1329 memcpy(parms->APP_data, cam->APP_data, cam->APP_len);
1330 parms->jpeg_markers |= V4L2_JPEG_MARKER_APP;
1331 }
1332
1333 parms->COM_len = cam->COM_len;
1334 if(cam->COM_len > 0) {
1335 memcpy(parms->COM_data, cam->COM_data, cam->COM_len);
1336 parms->jpeg_markers |= JPEG_MARKER_COM;
1337 }
1338
1339 DBG("G_JPEGCOMP APP_len:%d COM_len:%d\n",
1340 parms->APP_len, parms->COM_len);
1341
1342 return 0;
1343}
1344
1345/******************************************************************************
1346 *
1347 * ioctl_s_jpegcomp
1348 *
1349 * V4L2 set the JPEG compression parameters
1350 * NOTE: quality and some jpeg_markers are ignored.
1351 *
1352 *****************************************************************************/
1353
1354static int ioctl_s_jpegcomp(void *arg,struct camera_data *cam)
1355{
1356 struct v4l2_jpegcompression *parms = arg;
1357
1358 DBG("S_JPEGCOMP APP_len:%d COM_len:%d\n",
1359 parms->APP_len, parms->COM_len);
1360
1361 cam->params.compression.inhibit_htables =
1362 !(parms->jpeg_markers & V4L2_JPEG_MARKER_DHT);
1363
1364 if(parms->APP_len != 0) {
1365 if(parms->APP_len > 0 &&
1366 parms->APP_len <= sizeof(cam->APP_data) &&
1367 parms->APPn >= 0 && parms->APPn <= 15) {
1368 cam->APPn = parms->APPn;
1369 cam->APP_len = parms->APP_len;
1370 memcpy(cam->APP_data, parms->APP_data, parms->APP_len);
1371 } else {
1372 LOG("Bad APPn Params n=%d len=%d\n",
1373 parms->APPn, parms->APP_len);
1374 return -EINVAL;
1375 }
1376 } else {
1377 cam->APP_len = 0;
1378 }
1379
1380 if(parms->COM_len != 0) {
1381 if(parms->COM_len > 0 &&
1382 parms->COM_len <= sizeof(cam->COM_data)) {
1383 cam->COM_len = parms->COM_len;
1384 memcpy(cam->COM_data, parms->COM_data, parms->COM_len);
1385 } else {
1386 LOG("Bad COM_len=%d\n", parms->COM_len);
1387 return -EINVAL;
1388 }
1389 }
1390
1391 return 0;
1392}
1393
1394/******************************************************************************
1395 *
1396 * ioctl_reqbufs
1397 *
1398 * V4L2 Initiate memory mapping.
1399 * NOTE: The user's request is ignored. For now the buffers are fixed.
1400 *
1401 *****************************************************************************/
1402
1403static int ioctl_reqbufs(void *arg,struct camera_data *cam)
1404{
1405 struct v4l2_requestbuffers *req = arg;
1406
1407 if(req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1408 req->memory != V4L2_MEMORY_MMAP)
1409 return -EINVAL;
1410
1411 DBG("REQBUFS requested:%d returning:%d\n", req->count, cam->num_frames);
1412 req->count = cam->num_frames;
1413 memset(&req->reserved, 0, sizeof(req->reserved));
1414
1415 return 0;
1416}
1417
1418/******************************************************************************
1419 *
1420 * ioctl_querybuf
1421 *
1422 * V4L2 Query memory buffer status.
1423 *
1424 *****************************************************************************/
1425
1426static int ioctl_querybuf(void *arg,struct camera_data *cam)
1427{
1428 struct v4l2_buffer *buf = arg;
1429
1430 if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1431 buf->index > cam->num_frames)
1432 return -EINVAL;
1433
1434 buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
1435 buf->length = cam->frame_size;
1436
1437 buf->memory = V4L2_MEMORY_MMAP;
1438
1439 if(cam->mmapped)
1440 buf->flags = V4L2_BUF_FLAG_MAPPED;
1441 else
1442 buf->flags = 0;
1443
1444 switch (cam->buffers[buf->index].status) {
1445 case FRAME_EMPTY:
1446 case FRAME_ERROR:
1447 case FRAME_READING:
1448 buf->bytesused = 0;
1449 buf->flags = V4L2_BUF_FLAG_QUEUED;
1450 break;
1451 case FRAME_READY:
1452 buf->bytesused = cam->buffers[buf->index].length;
1453 buf->timestamp = cam->buffers[buf->index].timestamp;
1454 buf->sequence = cam->buffers[buf->index].seq;
1455 buf->flags = V4L2_BUF_FLAG_DONE;
1456 break;
1457 }
1458
1459 DBG("QUERYBUF index:%d offset:%d flags:%d seq:%d bytesused:%d\n",
1460 buf->index, buf->m.offset, buf->flags, buf->sequence,
1461 buf->bytesused);
1462
1463 return 0;
1464}
1465
1466/******************************************************************************
1467 *
1468 * ioctl_qbuf
1469 *
1470 * V4L2 User is freeing buffer
1471 *
1472 *****************************************************************************/
1473
1474static int ioctl_qbuf(void *arg,struct camera_data *cam)
1475{
1476 struct v4l2_buffer *buf = arg;
1477
1478 if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1479 buf->memory != V4L2_MEMORY_MMAP ||
1480 buf->index > cam->num_frames)
1481 return -EINVAL;
1482
1483 DBG("QBUF #%d\n", buf->index);
1484
1485 if(cam->buffers[buf->index].status == FRAME_READY)
1486 cam->buffers[buf->index].status = FRAME_EMPTY;
1487
1488 return 0;
1489}
1490
1491/******************************************************************************
1492 *
1493 * find_earliest_filled_buffer
1494 *
1495 * Helper for ioctl_dqbuf. Find the next ready buffer.
1496 *
1497 *****************************************************************************/
1498
1499static int find_earliest_filled_buffer(struct camera_data *cam)
1500{
1501 int i;
1502 int found = -1;
1503 for (i=0; i<cam->num_frames; i++) {
1504 if(cam->buffers[i].status == FRAME_READY) {
1505 if(found < 0) {
1506 found = i;
1507 } else {
1508 /* find which buffer is earlier */
1509 struct timeval *tv1, *tv2;
1510 tv1 = &cam->buffers[i].timestamp;
1511 tv2 = &cam->buffers[found].timestamp;
1512 if(tv1->tv_sec < tv2->tv_sec ||
1513 (tv1->tv_sec == tv2->tv_sec &&
1514 tv1->tv_usec < tv2->tv_usec))
1515 found = i;
1516 }
1517 }
1518 }
1519 return found;
1520}
1521
1522/******************************************************************************
1523 *
1524 * ioctl_dqbuf
1525 *
1526 * V4L2 User is asking for a filled buffer.
1527 *
1528 *****************************************************************************/
1529
1530static int ioctl_dqbuf(void *arg,struct camera_data *cam, struct file *file)
1531{
1532 struct v4l2_buffer *buf = arg;
1533 int frame;
1534
1535 if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1536 buf->memory != V4L2_MEMORY_MMAP)
1537 return -EINVAL;
1538
1539 frame = find_earliest_filled_buffer(cam);
1540
1541 if(frame < 0 && file->f_flags&O_NONBLOCK)
1542 return -EAGAIN;
1543
1544 if(frame < 0) {
1545 /* Wait for a frame to become available */
1546 struct framebuf *cb=cam->curbuff;
1547 up(&cam->busy_lock);
1548 wait_event_interruptible(cam->wq_stream,
1549 !cam->present ||
1550 (cb=cam->curbuff)->status == FRAME_READY);
1551 down(&cam->busy_lock);
1552 if (signal_pending(current))
1553 return -ERESTARTSYS;
1554 if(!cam->present)
1555 return -ENOTTY;
1556 frame = cb->num;
1557 }
1558
1559
1560 buf->index = frame;
1561 buf->bytesused = cam->buffers[buf->index].length;
1562 buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE;
1563 buf->field = V4L2_FIELD_NONE;
1564 buf->timestamp = cam->buffers[buf->index].timestamp;
1565 buf->sequence = cam->buffers[buf->index].seq;
1566 buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
1567 buf->length = cam->frame_size;
1568 buf->input = 0;
1569 buf->reserved = 0;
1570 memset(&buf->timecode, 0, sizeof(buf->timecode));
1571
1572 DBG("DQBUF #%d status:%d seq:%d length:%d\n", buf->index,
1573 cam->buffers[buf->index].status, buf->sequence, buf->bytesused);
1574
1575 return 0;
1576}
1577
1578/******************************************************************************
1579 *
1580 * cpia2_ioctl
1581 *
1582 *****************************************************************************/
1583static int cpia2_do_ioctl(struct inode *inode, struct file *file,
1584 unsigned int ioctl_nr, void *arg)
1585{
1586 struct video_device *dev = video_devdata(file);
1587 struct camera_data *cam = video_get_drvdata(dev);
1588 int retval = 0;
1589
1590 if (!cam)
1591 return -ENOTTY;
1592
1593 /* make this _really_ smp-safe */
1594 if (down_interruptible(&cam->busy_lock))
1595 return -ERESTARTSYS;
1596
1597 if (!cam->present) {
1598 up(&cam->busy_lock);
1599 return -ENODEV;
1600 }
1601
1602 /* Priority check */
1603 switch (ioctl_nr) {
1604 case VIDIOCSWIN:
1605 case VIDIOCMCAPTURE:
1606 case VIDIOC_S_FMT:
1607 {
1608 struct cpia2_fh *fh = file->private_data;
1609 retval = v4l2_prio_check(&cam->prio, &fh->prio);
1610 if(retval) {
1611 up(&cam->busy_lock);
1612 return retval;
1613 }
1614 break;
1615 }
1616 case VIDIOCGMBUF:
1617 case VIDIOCSYNC:
1618 {
1619 struct cpia2_fh *fh = file->private_data;
1620 if(fh->prio != V4L2_PRIORITY_RECORD) {
1621 up(&cam->busy_lock);
1622 return -EBUSY;
1623 }
1624 break;
1625 }
1626 default:
1627 break;
1628 }
1629
1630 switch (ioctl_nr) {
1631 case VIDIOCGCAP: /* query capabilities */
1632 retval = ioctl_cap_query(arg, cam);
1633 break;
1634
1635 case VIDIOCGCHAN: /* get video source - we are a camera, nothing else */
1636 retval = ioctl_get_channel(arg);
1637 break;
1638 case VIDIOCSCHAN: /* set video source - we are a camera, nothing else */
1639 retval = ioctl_set_channel(arg);
1640 break;
1641 case VIDIOCGPICT: /* image properties */
1642 memcpy(arg, &cam->vp, sizeof(struct video_picture));
1643 break;
1644 case VIDIOCSPICT:
1645 retval = ioctl_set_image_prop(arg, cam);
1646 break;
1647 case VIDIOCGWIN: /* get/set capture window */
1648 memcpy(arg, &cam->vw, sizeof(struct video_window));
1649 break;
1650 case VIDIOCSWIN:
1651 retval = ioctl_set_window_size(arg, cam, file->private_data);
1652 break;
1653 case VIDIOCGMBUF: /* mmap interface */
1654 retval = ioctl_get_mbuf(arg, cam);
1655 break;
1656 case VIDIOCMCAPTURE:
1657 retval = ioctl_mcapture(arg, cam, file->private_data);
1658 break;
1659 case VIDIOCSYNC:
1660 retval = ioctl_sync(arg, cam);
1661 break;
1662 /* pointless to implement overlay with this camera */
1663 case VIDIOCCAPTURE:
1664 case VIDIOCGFBUF:
1665 case VIDIOCSFBUF:
1666 case VIDIOCKEY:
1667 retval = -EINVAL;
1668 break;
1669
1670 /* tuner interface - we have none */
1671 case VIDIOCGTUNER:
1672 case VIDIOCSTUNER:
1673 case VIDIOCGFREQ:
1674 case VIDIOCSFREQ:
1675 retval = -EINVAL;
1676 break;
1677
1678 /* audio interface - we have none */
1679 case VIDIOCGAUDIO:
1680 case VIDIOCSAUDIO:
1681 retval = -EINVAL;
1682 break;
1683
1684 /* CPIA2 extension to Video4Linux API */
1685 case CPIA2_IOC_SET_GPIO:
1686 retval = ioctl_set_gpio(arg, cam);
1687 break;
1688 case VIDIOC_QUERYCAP:
1689 retval = ioctl_querycap(arg,cam);
1690 break;
1691
1692 case VIDIOC_ENUMINPUT:
1693 case VIDIOC_G_INPUT:
1694 case VIDIOC_S_INPUT:
1695 retval = ioctl_input(ioctl_nr, arg,cam);
1696 break;
1697
1698 case VIDIOC_ENUM_FMT:
1699 retval = ioctl_enum_fmt(arg,cam);
1700 break;
1701 case VIDIOC_TRY_FMT:
1702 retval = ioctl_try_fmt(arg,cam);
1703 break;
1704 case VIDIOC_G_FMT:
1705 retval = ioctl_get_fmt(arg,cam);
1706 break;
1707 case VIDIOC_S_FMT:
1708 retval = ioctl_set_fmt(arg,cam,file->private_data);
1709 break;
1710
1711 case VIDIOC_CROPCAP:
1712 retval = ioctl_cropcap(arg,cam);
1713 break;
1714 case VIDIOC_G_CROP:
1715 case VIDIOC_S_CROP:
1716 // TODO: I think cropping can be implemented - SJB
1717 retval = -EINVAL;
1718 break;
1719
1720 case VIDIOC_QUERYCTRL:
1721 retval = ioctl_queryctrl(arg,cam);
1722 break;
1723 case VIDIOC_QUERYMENU:
1724 retval = ioctl_querymenu(arg,cam);
1725 break;
1726 case VIDIOC_G_CTRL:
1727 retval = ioctl_g_ctrl(arg,cam);
1728 break;
1729 case VIDIOC_S_CTRL:
1730 retval = ioctl_s_ctrl(arg,cam);
1731 break;
1732
1733 case VIDIOC_G_JPEGCOMP:
1734 retval = ioctl_g_jpegcomp(arg,cam);
1735 break;
1736 case VIDIOC_S_JPEGCOMP:
1737 retval = ioctl_s_jpegcomp(arg,cam);
1738 break;
1739
1740 case VIDIOC_G_PRIORITY:
1741 {
1742 struct cpia2_fh *fh = file->private_data;
1743 *(enum v4l2_priority*)arg = fh->prio;
1744 break;
1745 }
1746 case VIDIOC_S_PRIORITY:
1747 {
1748 struct cpia2_fh *fh = file->private_data;
1749 enum v4l2_priority prio;
1750 prio = *(enum v4l2_priority*)arg;
1751 if(cam->streaming &&
1752 prio != fh->prio &&
1753 fh->prio == V4L2_PRIORITY_RECORD) {
1754 /* Can't drop record priority while streaming */
1755 retval = -EBUSY;
1756 } else if(prio == V4L2_PRIORITY_RECORD &&
1757 prio != fh->prio &&
1758 v4l2_prio_max(&cam->prio) == V4L2_PRIORITY_RECORD) {
1759 /* Only one program can record at a time */
1760 retval = -EBUSY;
1761 } else {
1762 retval = v4l2_prio_change(&cam->prio, &fh->prio, prio);
1763 }
1764 break;
1765 }
1766
1767 case VIDIOC_REQBUFS:
1768 retval = ioctl_reqbufs(arg,cam);
1769 break;
1770 case VIDIOC_QUERYBUF:
1771 retval = ioctl_querybuf(arg,cam);
1772 break;
1773 case VIDIOC_QBUF:
1774 retval = ioctl_qbuf(arg,cam);
1775 break;
1776 case VIDIOC_DQBUF:
1777 retval = ioctl_dqbuf(arg,cam,file);
1778 break;
1779 case VIDIOC_STREAMON:
1780 {
1781 int type;
1782 DBG("VIDIOC_STREAMON, streaming=%d\n", cam->streaming);
1783 type = *(int*)arg;
1784 if(!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1785 retval = -EINVAL;
1786
1787 if(!cam->streaming) {
1788 retval = cpia2_usb_stream_start(cam,
1789 cam->params.camera_state.stream_mode);
1790 } else {
1791 retval = -EINVAL;
1792 }
1793
1794 break;
1795 }
1796 case VIDIOC_STREAMOFF:
1797 {
1798 int type;
1799 DBG("VIDIOC_STREAMOFF, streaming=%d\n", cam->streaming);
1800 type = *(int*)arg;
1801 if(!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1802 retval = -EINVAL;
1803
1804 if(cam->streaming) {
1805 retval = cpia2_usb_stream_stop(cam);
1806 } else {
1807 retval = -EINVAL;
1808 }
1809
1810 break;
1811 }
1812
1813 case VIDIOC_ENUMOUTPUT:
1814 case VIDIOC_G_OUTPUT:
1815 case VIDIOC_S_OUTPUT:
1816 case VIDIOC_G_MODULATOR:
1817 case VIDIOC_S_MODULATOR:
1818
1819 case VIDIOC_ENUMAUDIO:
1820 case VIDIOC_G_AUDIO:
1821 case VIDIOC_S_AUDIO:
1822
1823 case VIDIOC_ENUMAUDOUT:
1824 case VIDIOC_G_AUDOUT:
1825 case VIDIOC_S_AUDOUT:
1826
1827 case VIDIOC_ENUMSTD:
1828 case VIDIOC_QUERYSTD:
1829 case VIDIOC_G_STD:
1830 case VIDIOC_S_STD:
1831
1832 case VIDIOC_G_TUNER:
1833 case VIDIOC_S_TUNER:
1834 case VIDIOC_G_FREQUENCY:
1835 case VIDIOC_S_FREQUENCY:
1836
1837 case VIDIOC_OVERLAY:
1838 case VIDIOC_G_FBUF:
1839 case VIDIOC_S_FBUF:
1840
1841 case VIDIOC_G_PARM:
1842 case VIDIOC_S_PARM:
1843 retval = -EINVAL;
1844 break;
1845 default:
1846 retval = -ENOIOCTLCMD;
1847 break;
1848 }
1849
1850 up(&cam->busy_lock);
1851 return retval;
1852}
1853
1854static int cpia2_ioctl(struct inode *inode, struct file *file,
1855 unsigned int ioctl_nr, unsigned long iarg)
1856{
1857 return video_usercopy(inode, file, ioctl_nr, iarg, cpia2_do_ioctl);
1858}
1859
1860/******************************************************************************
1861 *
1862 * cpia2_mmap
1863 *
1864 *****************************************************************************/
1865static int cpia2_mmap(struct file *file, struct vm_area_struct *area)
1866{
1867 int retval;
1868 struct video_device *dev = video_devdata(file);
1869 struct camera_data *cam = video_get_drvdata(dev);
1870
1871 /* Priority check */
1872 struct cpia2_fh *fh = file->private_data;
1873 if(fh->prio != V4L2_PRIORITY_RECORD) {
1874 return -EBUSY;
1875 }
1876
1877 retval = cpia2_remap_buffer(cam, area);
1878
1879 if(!retval)
1880 fh->mmapped = 1;
1881 return retval;
1882}
1883
1884/******************************************************************************
1885 *
1886 * reset_camera_struct_v4l
1887 *
1888 * Sets all values to the defaults
1889 *****************************************************************************/
1890static void reset_camera_struct_v4l(struct camera_data *cam)
1891{
1892 /***
1893 * Fill in the v4l structures. video_cap is filled in inside the VIDIOCCAP
1894 * Ioctl. Here, just do the window and picture stucts.
1895 ***/
1896 cam->vp.palette = (u16) VIDEO_PALETTE_RGB24; /* Is this right? */
1897 cam->vp.brightness = (u16) cam->params.color_params.brightness * 256;
1898 cam->vp.colour = (u16) cam->params.color_params.saturation * 256;
1899 cam->vp.contrast = (u16) cam->params.color_params.contrast * 256;
1900
1901 cam->vw.x = 0;
1902 cam->vw.y = 0;
1903 cam->vw.width = cam->params.roi.width;
1904 cam->vw.height = cam->params.roi.height;
1905 cam->vw.flags = 0;
1906 cam->vw.clipcount = 0;
1907
1908 cam->frame_size = buffer_size;
1909 cam->num_frames = num_buffers;
1910
1911 /* FlickerModes */
1912 cam->params.flicker_control.flicker_mode_req = flicker_mode;
1913 cam->params.flicker_control.mains_frequency = flicker_freq;
1914
1915 /* streamMode */
1916 cam->params.camera_state.stream_mode = alternate;
1917
1918 cam->pixelformat = V4L2_PIX_FMT_JPEG;
1919 v4l2_prio_init(&cam->prio);
1920 return;
1921}
1922
1923/***
1924 * The v4l video device structure initialized for this device
1925 ***/
1926static struct file_operations fops_template = {
1927 .owner= THIS_MODULE,
1928 .open= cpia2_open,
1929 .release= cpia2_close,
1930 .read= cpia2_v4l_read,
1931 .poll= cpia2_v4l_poll,
1932 .ioctl= cpia2_ioctl,
1933 .llseek= no_llseek,
1934 .mmap= cpia2_mmap,
1935};
1936
1937static struct video_device cpia2_template = {
1938 /* I could not find any place for the old .initialize initializer?? */
1939 .owner= THIS_MODULE,
1940 .name= "CPiA2 Camera",
1941 .type= VID_TYPE_CAPTURE,
1942 .type2 = V4L2_CAP_VIDEO_CAPTURE |
1943 V4L2_CAP_STREAMING,
1944 .hardware= VID_HARDWARE_CPIA2,
1945 .minor= -1,
1946 .fops= &fops_template,
1947 .release= video_device_release,
1948};
1949
1950/******************************************************************************
1951 *
1952 * cpia2_register_camera
1953 *
1954 *****************************************************************************/
1955int cpia2_register_camera(struct camera_data *cam)
1956{
1957 cam->vdev = video_device_alloc();
1958 if(!cam->vdev)
1959 return -ENOMEM;
1960
1961 memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template));
1962 video_set_drvdata(cam->vdev, cam);
1963
1964 reset_camera_struct_v4l(cam);
1965
1966 /* register v4l device */
1967 if (video_register_device
1968 (cam->vdev, VFL_TYPE_GRABBER, video_nr) == -1) {
1969 ERR("video_register_device failed\n");
1970 video_device_release(cam->vdev);
1971 return -ENODEV;
1972 }
1973
1974 return 0;
1975}
1976
1977/******************************************************************************
1978 *
1979 * cpia2_unregister_camera
1980 *
1981 *****************************************************************************/
1982void cpia2_unregister_camera(struct camera_data *cam)
1983{
1984 if (!cam->open_count) {
1985 video_unregister_device(cam->vdev);
1986 } else {
1987 LOG("/dev/video%d removed while open, "
1988 "deferring video_unregister_device\n",
1989 cam->vdev->minor);
1990 }
1991}
1992
1993/******************************************************************************
1994 *
1995 * check_parameters
1996 *
1997 * Make sure that all user-supplied parameters are sensible
1998 *****************************************************************************/
1999static void __init check_parameters(void)
2000{
2001 if(buffer_size < PAGE_SIZE) {
2002 buffer_size = PAGE_SIZE;
2003 LOG("buffer_size too small, setting to %d\n", buffer_size);
2004 } else if(buffer_size > 1024*1024) {
2005 /* arbitrary upper limiit */
2006 buffer_size = 1024*1024;
2007 LOG("buffer_size ridiculously large, setting to %d\n",
2008 buffer_size);
2009 } else {
2010 buffer_size += PAGE_SIZE-1;
2011 buffer_size &= ~(PAGE_SIZE-1);
2012 }
2013
2014 if(num_buffers < 1) {
2015 num_buffers = 1;
2016 LOG("num_buffers too small, setting to %d\n", num_buffers);
2017 } else if(num_buffers > VIDEO_MAX_FRAME) {
2018 num_buffers = VIDEO_MAX_FRAME;
2019 LOG("num_buffers too large, setting to %d\n", num_buffers);
2020 }
2021
2022 if(alternate < USBIF_ISO_1 || alternate > USBIF_ISO_6) {
2023 alternate = DEFAULT_ALT;
2024 LOG("alternate specified is invalid, using %d\n", alternate);
2025 }
2026
2027 if (flicker_mode != NEVER_FLICKER && flicker_mode != ANTI_FLICKER_ON) {
2028 flicker_mode = NEVER_FLICKER;
2029 LOG("Flicker mode specified is invalid, using %d\n",
2030 flicker_mode);
2031 }
2032
2033 if (flicker_freq != FLICKER_50 && flicker_freq != FLICKER_60) {
2034 flicker_freq = FLICKER_60;
2035 LOG("Flicker mode specified is invalid, using %d\n",
2036 flicker_freq);
2037 }
2038
2039 if(video_nr < -1 || video_nr > 64) {
2040 video_nr = -1;
2041 LOG("invalid video_nr specified, must be -1 to 64\n");
2042 }
2043
2044 DBG("Using %d buffers, each %d bytes, alternate=%d\n",
2045 num_buffers, buffer_size, alternate);
2046}
2047
2048/************ Module Stuff ***************/
2049
2050
2051/******************************************************************************
2052 *
2053 * cpia2_init/module_init
2054 *
2055 *****************************************************************************/
2056static int __init cpia2_init(void)
2057{
2058 LOG("%s v%d.%d.%d\n",
2059 ABOUT, CPIA2_MAJ_VER, CPIA2_MIN_VER, CPIA2_PATCH_VER);
2060 check_parameters();
2061 cpia2_usb_init();
2062 return 0;
2063}
2064
2065
2066/******************************************************************************
2067 *
2068 * cpia2_exit/module_exit
2069 *
2070 *****************************************************************************/
2071static void __exit cpia2_exit(void)
2072{
2073 cpia2_usb_cleanup();
2074 schedule_timeout(2 * HZ);
2075}
2076
2077module_init(cpia2_init);
2078module_exit(cpia2_exit);
2079
diff --git a/drivers/media/video/cpia2/cpia2dev.h b/drivers/media/video/cpia2/cpia2dev.h
new file mode 100644
index 000000000000..d58097ce0d5e
--- /dev/null
+++ b/drivers/media/video/cpia2/cpia2dev.h
@@ -0,0 +1,50 @@
1/****************************************************************************
2 *
3 * Filename: cpia2dev.h
4 *
5 * Copyright 2001, STMicrolectronics, Inc.
6 *
7 * Contact: steve.miller@st.com
8 *
9 * Description:
10 * This file provides definitions for applications wanting to use the
11 * cpia2 driver beyond the generic v4l capabilities.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 ****************************************************************************/
28
29#ifndef CPIA2_DEV_HEADER
30#define CPIA2_DEV_HEADER
31
32#include <linux/videodev.h>
33
34/***
35 * The following defines are ioctl numbers based on video4linux private ioctls,
36 * which can range from 192 (BASE_VIDIOCPRIVATE) to 255. All of these take int
37 * args
38 */
39#define CPIA2_IOC_SET_GPIO _IOW('v', BASE_VIDIOCPRIVATE + 17, __u32)
40
41/* V4L2 driver specific controls */
42#define CPIA2_CID_TARGET_KB (V4L2_CID_PRIVATE_BASE+0)
43#define CPIA2_CID_GPIO (V4L2_CID_PRIVATE_BASE+1)
44#define CPIA2_CID_FLICKER_MODE (V4L2_CID_PRIVATE_BASE+2)
45#define CPIA2_CID_FRAMERATE (V4L2_CID_PRIVATE_BASE+3)
46#define CPIA2_CID_USB_ALT (V4L2_CID_PRIVATE_BASE+4)
47#define CPIA2_CID_LIGHTS (V4L2_CID_PRIVATE_BASE+5)
48#define CPIA2_CID_RESET_CAMERA (V4L2_CID_PRIVATE_BASE+6)
49
50#endif
diff --git a/drivers/media/video/cpia2/cpia2patch.h b/drivers/media/video/cpia2/cpia2patch.h
new file mode 100644
index 000000000000..7f085fbe76fb
--- /dev/null
+++ b/drivers/media/video/cpia2/cpia2patch.h
@@ -0,0 +1,233 @@
1/****************************************************************************
2 *
3 * Filename: cpia2patch.h
4 *
5 * Copyright 2001, STMicrolectronics, Inc.
6 *
7 * Contact: steve.miller@st.com
8 *
9 * Description:
10 * This file contains patch data for the CPiA2 (stv0672) VP4.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 ****************************************************************************/
27
28#ifndef CPIA2_PATCH_HEADER
29#define CPIA2_PATCH_HEADER
30
31typedef struct {
32 unsigned char reg;
33 unsigned char count;
34 const unsigned char *data;
35} cpia2_patch;
36
37static const unsigned char start_address_hi[1] = {
38 0x01
39};
40
41static const unsigned char start_address_lo[1] = {
42 0xBC
43};
44
45static const unsigned char patch_block0[64] = {
46 0xE3, 0x02, 0xE3, 0x03, 0xE3, 0x04, 0xE3, 0x05,
47 0xE3, 0x06, 0xE3, 0x07, 0x93, 0x44, 0x56, 0xD4,
48 0x93, 0x4E, 0x56, 0x51, 0x93, 0x4E, 0x51, 0xD6,
49 0x93, 0x4E, 0x4F, 0x54, 0x93, 0x4E, 0x92, 0x4F,
50 0x92, 0xA4, 0x93, 0x05, 0x92, 0xF4, 0x93, 0x1B,
51 0x92, 0x92, 0x91, 0xE6, 0x92, 0x36, 0x92, 0x74,
52 0x92, 0x4A, 0x92, 0x8C, 0x92, 0x8E, 0xC8, 0xD0,
53 0x0B, 0x42, 0x02, 0xA0, 0xCA, 0x92, 0x09, 0x02
54};
55
56static const unsigned char patch_block1[64] = {
57 0xC9, 0x10, 0x0A, 0x0A, 0x0A, 0x81, 0xE3, 0xB8,
58 0xE3, 0xB0, 0xE3, 0xA8, 0xE3, 0xA0, 0xE3, 0x98,
59 0xE3, 0x90, 0xE1, 0x00, 0xCF, 0xD7, 0x0A, 0x12,
60 0xCC, 0x95, 0x08, 0xB2, 0x0A, 0x18, 0xE1, 0x00,
61 0x01, 0xEE, 0x0C, 0x08, 0x4A, 0x12, 0xC8, 0x18,
62 0xF0, 0x9A, 0xC0, 0x22, 0xF3, 0x1C, 0x4A, 0x13,
63 0xF3, 0x14, 0xC8, 0xA0, 0xF2, 0x14, 0xF2, 0x1C,
64 0xEB, 0x13, 0xD3, 0xA2, 0x63, 0x16, 0x48, 0x9E
65};
66
67static const unsigned char patch_block2[64] = {
68 0xF0, 0x18, 0xA4, 0x03, 0xF3, 0x93, 0xC0, 0x58,
69 0xF7, 0x13, 0x51, 0x9C, 0xE9, 0x20, 0xCF, 0xEF,
70 0x63, 0xF9, 0x92, 0x2E, 0xD3, 0x5F, 0x63, 0xFA,
71 0x92, 0x2E, 0xD3, 0x67, 0x63, 0xFB, 0x92, 0x2E,
72 0xD3, 0x6F, 0xE9, 0x1A, 0x63, 0x16, 0x48, 0xA7,
73 0xF0, 0x20, 0xA4, 0x06, 0xF3, 0x94, 0xC0, 0x27,
74 0xF7, 0x14, 0xF5, 0x13, 0x51, 0x9D, 0xF6, 0x13,
75 0x63, 0x18, 0xC4, 0x20, 0xCB, 0xEF, 0x63, 0xFC
76};
77
78static const unsigned char patch_block3[64] = {
79 0x92, 0x2E, 0xD3, 0x77, 0x63, 0xFD, 0x92, 0x2E,
80 0xD3, 0x7F, 0x63, 0xFE, 0x92, 0x2E, 0xD3, 0x87,
81 0x63, 0xFF, 0x92, 0x2E, 0xD3, 0x8F, 0x64, 0x38,
82 0x92, 0x2E, 0xD3, 0x97, 0x64, 0x39, 0x92, 0x2E,
83 0xD3, 0x9F, 0xE1, 0x00, 0xF5, 0x3A, 0xF4, 0x3B,
84 0xF7, 0xBF, 0xF2, 0xBC, 0xF2, 0x3D, 0xE1, 0x00,
85 0x80, 0x87, 0x90, 0x80, 0x51, 0xD5, 0x02, 0x22,
86 0x02, 0x32, 0x4B, 0xD3, 0xF7, 0x11, 0x0B, 0xDA
87};
88
89static const unsigned char patch_block4[64] = {
90 0xE1, 0x00, 0x0E, 0x02, 0x02, 0x40, 0x0D, 0xB5,
91 0xE3, 0x02, 0x48, 0x55, 0xE5, 0x12, 0xA4, 0x01,
92 0xE8, 0x1B, 0xE3, 0x90, 0xF0, 0x18, 0xA4, 0x01,
93 0xE8, 0xBF, 0x8D, 0xB8, 0x4B, 0xD1, 0x4B, 0xD8,
94 0x0B, 0xCB, 0x0B, 0xC2, 0xE1, 0x00, 0xE3, 0x02,
95 0xE3, 0x03, 0x52, 0xD3, 0x60, 0x59, 0xE6, 0x93,
96 0x0D, 0x22, 0x52, 0xD4, 0xE6, 0x93, 0x0D, 0x2A,
97 0xE3, 0x98, 0xE3, 0x90, 0xE1, 0x00, 0x02, 0x5D
98};
99
100static const unsigned char patch_block5[64] = {
101 0x02, 0x63, 0xE3, 0x02, 0xC8, 0x12, 0x02, 0xCA,
102 0xC8, 0x52, 0x02, 0xC2, 0x82, 0x68, 0xE3, 0x02,
103 0xC8, 0x14, 0x02, 0xCA, 0xC8, 0x90, 0x02, 0xC2,
104 0x0A, 0xD0, 0xC9, 0x93, 0x0A, 0xDA, 0xCC, 0xD2,
105 0x0A, 0xE2, 0x63, 0x12, 0x02, 0xDA, 0x0A, 0x98,
106 0x0A, 0xA0, 0x0A, 0xA8, 0xE3, 0x90, 0xE1, 0x00,
107 0xE3, 0x02, 0x0A, 0xD0, 0xC9, 0x93, 0x0A, 0xDA,
108 0xCC, 0xD2, 0x0A, 0xE2, 0x63, 0x12, 0x02, 0xDA
109};
110
111static const unsigned char patch_block6[64] = {
112 0x0A, 0x98, 0x0A, 0xA0, 0x0A, 0xA8, 0x49, 0x91,
113 0xE5, 0x6A, 0xA4, 0x04, 0xC8, 0x12, 0x02, 0xCA,
114 0xC8, 0x52, 0x82, 0x89, 0xC8, 0x14, 0x02, 0xCA,
115 0xC8, 0x90, 0x02, 0xC2, 0xE3, 0x90, 0xE1, 0x00,
116 0x08, 0x60, 0xE1, 0x00, 0x48, 0x53, 0xE8, 0x97,
117 0x08, 0x5A, 0xE1, 0x00, 0xE3, 0x02, 0xE3, 0x03,
118 0x54, 0xD3, 0x60, 0x59, 0xE6, 0x93, 0x0D, 0x52,
119 0xE3, 0x98, 0xE3, 0x90, 0xE1, 0x00, 0x02, 0x9C
120};
121
122static const unsigned char patch_block7[64] = {
123 0xE3, 0x02, 0x55, 0x13, 0x93, 0x17, 0x55, 0x13,
124 0x93, 0x17, 0xE3, 0x90, 0xE1, 0x00, 0x75, 0x30,
125 0xE3, 0x02, 0xE3, 0x03, 0x55, 0x55, 0x60, 0x59,
126 0xE6, 0x93, 0x0D, 0xB2, 0xE3, 0x98, 0xE3, 0x90,
127 0xE1, 0x00, 0x02, 0xAE, 0xE7, 0x92, 0xE9, 0x18,
128 0xEA, 0x9A, 0xE8, 0x98, 0xE8, 0x10, 0xE8, 0x11,
129 0xE8, 0x51, 0xD2, 0xDA, 0xD2, 0xF3, 0xE8, 0x13,
130 0xD2, 0xFA, 0xE8, 0x50, 0xD2, 0xEA, 0xE8, 0xD0
131};
132
133static const unsigned char patch_block8[64] = {
134 0xE8, 0xD1, 0xD3, 0x0A, 0x03, 0x09, 0x48, 0x23,
135 0xE5, 0x2C, 0xA0, 0x03, 0x48, 0x24, 0xEA, 0x1C,
136 0x03, 0x08, 0xD2, 0xE3, 0xD3, 0x03, 0xD3, 0x13,
137 0xE1, 0x00, 0x02, 0xCB, 0x05, 0x93, 0x57, 0x93,
138 0xF0, 0x9A, 0xAC, 0x0B, 0xE3, 0x07, 0x92, 0xEA,
139 0xE2, 0x9F, 0xE5, 0x06, 0xE3, 0xB0, 0xA0, 0x02,
140 0xEB, 0x1E, 0x82, 0xD7, 0xEA, 0x1E, 0xE2, 0x3B,
141 0x85, 0x9B, 0xE9, 0x1E, 0xC8, 0x90, 0x85, 0x94
142};
143
144static const unsigned char patch_block9[64] = {
145 0x02, 0xDE, 0x05, 0x80, 0x57, 0x93, 0xF0, 0xBA,
146 0xAC, 0x06, 0x92, 0xEA, 0xE2, 0xBF, 0xE5, 0x06,
147 0xA0, 0x01, 0xEB, 0xBF, 0x85, 0x88, 0xE9, 0x3E,
148 0xC8, 0x90, 0x85, 0x81, 0xE9, 0x3E, 0xF0, 0xBA,
149 0xF3, 0x39, 0xF0, 0x3A, 0x60, 0x17, 0xF0, 0x3A,
150 0xC0, 0x90, 0xF0, 0xBA, 0xE1, 0x00, 0x00, 0x3F,
151 0xE3, 0x02, 0xE3, 0x03, 0x58, 0x10, 0x60, 0x59,
152 0xE6, 0x93, 0x0D, 0xA2, 0x58, 0x12, 0xE6, 0x93
153};
154
155static const unsigned char patch_block10[64] = {
156 0x0D, 0xAA, 0xE3, 0x98, 0xE3, 0x90, 0xE1, 0x00,
157 0x03, 0x01, 0xE1, 0x00, 0x03, 0x03, 0x9B, 0x7D,
158 0x8B, 0x8B, 0xE3, 0x02, 0xE3, 0x03, 0x58, 0x56,
159 0x60, 0x59, 0xE6, 0x93, 0x0D, 0xBA, 0xE3, 0x98,
160 0xE3, 0x90, 0xE1, 0x00, 0x03, 0x0F, 0x93, 0x11,
161 0xE1, 0x00, 0xE3, 0x02, 0x4A, 0x11, 0x0B, 0x42,
162 0x91, 0xAF, 0xE3, 0x90, 0xE1, 0x00, 0xF2, 0x91,
163 0xF0, 0x91, 0xA3, 0xFE, 0xE1, 0x00, 0x60, 0x92
164};
165
166static const unsigned char patch_block11[64] = {
167 0xC0, 0x5F, 0xF0, 0x13, 0xF0, 0x13, 0x59, 0x5B,
168 0xE2, 0x13, 0xF0, 0x11, 0x5A, 0x19, 0xE2, 0x13,
169 0xE1, 0x00, 0x00, 0x00, 0x03, 0x27, 0x68, 0x61,
170 0x76, 0x61, 0x6E, 0x61, 0x00, 0x06, 0x03, 0x2C,
171 0xE3, 0x02, 0xE3, 0x03, 0xE9, 0x38, 0x59, 0x15,
172 0x59, 0x5A, 0xF2, 0x9A, 0xBC, 0x0B, 0xA4, 0x0A,
173 0x59, 0x1E, 0xF3, 0x11, 0xF0, 0x1A, 0xE2, 0xBB,
174 0x59, 0x15, 0xF0, 0x11, 0x19, 0x2A, 0xE5, 0x02
175};
176
177static const unsigned char patch_block12[54] = {
178 0xA4, 0x01, 0xEB, 0xBF, 0xE3, 0x98, 0xE3, 0x90,
179 0xE1, 0x00, 0x03, 0x42, 0x19, 0x28, 0xE1, 0x00,
180 0xE9, 0x30, 0x60, 0x79, 0xE1, 0x00, 0xE3, 0x03,
181 0xE3, 0x07, 0x60, 0x79, 0x93, 0x4E, 0xE3, 0xB8,
182 0xE3, 0x98, 0xE1, 0x00, 0xE9, 0x1A, 0xF0, 0x1F,
183 0xE2, 0x33, 0xF0, 0x91, 0xE2, 0x92, 0xE0, 0x32,
184 0xF0, 0x31, 0xE1, 0x00, 0x00, 0x00
185};
186
187static const unsigned char do_call[1] = {
188 0x01
189};
190
191
192#define PATCH_DATA_SIZE 18
193
194static const cpia2_patch patch_data[PATCH_DATA_SIZE] = {
195 {0x0A, sizeof(start_address_hi), start_address_hi}
196 , // 0
197 {0x0B, sizeof(start_address_lo), start_address_lo}
198 , // 1
199 {0x0C, sizeof(patch_block0), patch_block0}
200 , // 2
201 {0x0C, sizeof(patch_block1), patch_block1}
202 , // 3
203 {0x0C, sizeof(patch_block2), patch_block2}
204 , // 4
205 {0x0C, sizeof(patch_block3), patch_block3}
206 , // 5
207 {0x0C, sizeof(patch_block4), patch_block4}
208 , // 6
209 {0x0C, sizeof(patch_block5), patch_block5}
210 , // 7
211 {0x0C, sizeof(patch_block6), patch_block6}
212 , // 8
213 {0x0C, sizeof(patch_block7), patch_block7}
214 , // 9
215 {0x0C, sizeof(patch_block8), patch_block8}
216 , // 10
217 {0x0C, sizeof(patch_block9), patch_block9}
218 , //11
219 {0x0C, sizeof(patch_block10), patch_block10}
220 , // 12
221 {0x0C, sizeof(patch_block11), patch_block11}
222 , // 13
223 {0x0C, sizeof(patch_block12), patch_block12}
224 , // 14
225 {0x0A, sizeof(start_address_hi), start_address_hi}
226 , // 15
227 {0x0B, sizeof(start_address_lo), start_address_lo}
228 , // 16
229 {0x0D, sizeof(do_call), do_call} //17
230};
231
232
233#endif
diff --git a/drivers/media/video/cx25840/Kconfig b/drivers/media/video/cx25840/Kconfig
new file mode 100644
index 000000000000..854264e42ec0
--- /dev/null
+++ b/drivers/media/video/cx25840/Kconfig
@@ -0,0 +1,9 @@
1config VIDEO_CX25840
2 tristate "Conexant CX2584x audio/video decoders"
3 depends on VIDEO_DEV && I2C && EXPERIMENTAL
4 select FW_LOADER
5 ---help---
6 Support for the Conexant CX2584x audio/video decoders.
7
8 To compile this driver as a module, choose M here: the
9 module will be called cx25840
diff --git a/drivers/media/video/cx25840/Makefile b/drivers/media/video/cx25840/Makefile
index 543ebacdc9d7..32a896c23d1e 100644
--- a/drivers/media/video/cx25840/Makefile
+++ b/drivers/media/video/cx25840/Makefile
@@ -1,6 +1,6 @@
1cx25840-objs := cx25840-core.o cx25840-audio.o cx25840-firmware.o \ 1cx25840-objs := cx25840-core.o cx25840-audio.o cx25840-firmware.o \
2 cx25840-vbi.o 2 cx25840-vbi.o
3 3
4obj-$(CONFIG_VIDEO_DECODER) += cx25840.o 4obj-$(CONFIG_VIDEO_CX25840) += cx25840.o
5 5
6EXTRA_CFLAGS += -I$(src)/.. 6EXTRA_CFLAGS += -I$(src)/..
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 5588b9a5c430..8a257978056f 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -743,6 +743,7 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
743 743
744 memset(input, 0, sizeof(*input)); 744 memset(input, 0, sizeof(*input));
745 input->index = state->aud_input; 745 input->index = state->aud_input;
746 input->capability = V4L2_AUDCAP_STEREO;
746 break; 747 break;
747 } 748 }
748 749
@@ -753,7 +754,6 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
753 case VIDIOC_G_TUNER: 754 case VIDIOC_G_TUNER:
754 { 755 {
755 u8 mode = cx25840_read(client, 0x804); 756 u8 mode = cx25840_read(client, 0x804);
756 u8 pref = cx25840_read(client, 0x809) & 0xf;
757 u8 vpres = cx25840_read(client, 0x80a) & 0x10; 757 u8 vpres = cx25840_read(client, 0x80a) & 0x10;
758 int val = 0; 758 int val = 0;
759 759
@@ -773,44 +773,49 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
773 val |= V4L2_TUNER_SUB_MONO; 773 val |= V4L2_TUNER_SUB_MONO;
774 774
775 if (mode == 2 || mode == 4) 775 if (mode == 2 || mode == 4)
776 val |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 776 val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
777 777
778 if (mode & 0x10) 778 if (mode & 0x10)
779 val |= V4L2_TUNER_SUB_SAP; 779 val |= V4L2_TUNER_SUB_SAP;
780 780
781 vt->rxsubchans = val; 781 vt->rxsubchans = val;
782 782 vt->audmode = state->audmode;
783 switch (pref) {
784 case 0:
785 vt->audmode = V4L2_TUNER_MODE_MONO;
786 break;
787 case 1:
788 case 2:
789 vt->audmode = V4L2_TUNER_MODE_LANG2;
790 break;
791 case 4:
792 default:
793 vt->audmode = V4L2_TUNER_MODE_STEREO;
794 }
795 break; 783 break;
796 } 784 }
797 785
798 case VIDIOC_S_TUNER: 786 case VIDIOC_S_TUNER:
787 if (state->radio)
788 break;
789
799 switch (vt->audmode) { 790 switch (vt->audmode) {
800 case V4L2_TUNER_MODE_MONO: 791 case V4L2_TUNER_MODE_MONO:
801 case V4L2_TUNER_MODE_LANG1: 792 /* mono -> mono
802 /* Force PREF_MODE to MONO */ 793 stereo -> mono
794 bilingual -> lang1 */
803 cx25840_and_or(client, 0x809, ~0xf, 0x00); 795 cx25840_and_or(client, 0x809, ~0xf, 0x00);
804 break; 796 break;
805 case V4L2_TUNER_MODE_STEREO: 797 case V4L2_TUNER_MODE_LANG1:
806 /* Force PREF_MODE to STEREO */ 798 /* mono -> mono
799 stereo -> stereo
800 bilingual -> lang1 */
807 cx25840_and_or(client, 0x809, ~0xf, 0x04); 801 cx25840_and_or(client, 0x809, ~0xf, 0x04);
808 break; 802 break;
803 case V4L2_TUNER_MODE_STEREO:
804 /* mono -> mono
805 stereo -> stereo
806 bilingual -> lang1/lang2 */
807 cx25840_and_or(client, 0x809, ~0xf, 0x07);
808 break;
809 case V4L2_TUNER_MODE_LANG2: 809 case V4L2_TUNER_MODE_LANG2:
810 /* Force PREF_MODE to LANG2 */ 810 /* mono -> mono
811 stereo ->stereo
812 bilingual -> lang2 */
811 cx25840_and_or(client, 0x809, ~0xf, 0x01); 813 cx25840_and_or(client, 0x809, ~0xf, 0x01);
812 break; 814 break;
815 default:
816 return -EINVAL;
813 } 817 }
818 state->audmode = vt->audmode;
814 break; 819 break;
815 820
816 case VIDIOC_G_FMT: 821 case VIDIOC_G_FMT:
@@ -891,6 +896,7 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address,
891 state->aud_input = CX25840_AUDIO8; 896 state->aud_input = CX25840_AUDIO8;
892 state->audclk_freq = 48000; 897 state->audclk_freq = 48000;
893 state->pvr150_workaround = 0; 898 state->pvr150_workaround = 0;
899 state->audmode = V4L2_TUNER_MODE_LANG1;
894 900
895 cx25840_initialize(client, 1); 901 cx25840_initialize(client, 1);
896 902
diff --git a/drivers/media/video/cx25840/cx25840-vbi.c b/drivers/media/video/cx25840/cx25840-vbi.c
index 04d879da7d63..e96fd1f1d6dc 100644
--- a/drivers/media/video/cx25840/cx25840-vbi.c
+++ b/drivers/media/video/cx25840/cx25840-vbi.c
@@ -151,7 +151,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
151 case VIDIOC_G_FMT: 151 case VIDIOC_G_FMT:
152 { 152 {
153 static u16 lcr2vbi[] = { 153 static u16 lcr2vbi[] = {
154 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */ 154 0, V4L2_SLICED_TELETEXT_PAL_B, 0, /* 1 */
155 0, V4L2_SLICED_WSS_625, 0, /* 4 */ 155 0, V4L2_SLICED_WSS_625, 0, /* 4 */
156 V4L2_SLICED_CAPTION_525, /* 6 */ 156 V4L2_SLICED_CAPTION_525, /* 6 */
157 0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */ 157 0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */
@@ -231,7 +231,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
231 for (i = 7; i <= 23; i++) { 231 for (i = 7; i <= 23; i++) {
232 for (x = 0; x <= 1; x++) { 232 for (x = 0; x <= 1; x++) {
233 switch (svbi->service_lines[1-x][i]) { 233 switch (svbi->service_lines[1-x][i]) {
234 case V4L2_SLICED_TELETEXT_B: 234 case V4L2_SLICED_TELETEXT_PAL_B:
235 lcr[i] |= 1 << (4 * x); 235 lcr[i] |= 1 << (4 * x);
236 break; 236 break;
237 case V4L2_SLICED_WSS_625: 237 case V4L2_SLICED_WSS_625:
@@ -282,7 +282,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
282 282
283 switch (id2) { 283 switch (id2) {
284 case 1: 284 case 1:
285 id2 = V4L2_SLICED_TELETEXT_B; 285 id2 = V4L2_SLICED_TELETEXT_PAL_B;
286 break; 286 break;
287 case 4: 287 case 4:
288 id2 = V4L2_SLICED_WSS_625; 288 id2 = V4L2_SLICED_WSS_625;
diff --git a/drivers/media/video/cx25840/cx25840.h b/drivers/media/video/cx25840/cx25840.h
index fd22f30dcc1b..dd70664d1dd9 100644
--- a/drivers/media/video/cx25840/cx25840.h
+++ b/drivers/media/video/cx25840/cx25840.h
@@ -78,6 +78,7 @@ struct cx25840_state {
78 enum cx25840_video_input vid_input; 78 enum cx25840_video_input vid_input;
79 enum cx25840_audio_input aud_input; 79 enum cx25840_audio_input aud_input;
80 u32 audclk_freq; 80 u32 audclk_freq;
81 int audmode;
81}; 82};
82 83
83/* ----------------------------------------------------------------------- */ 84/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 87d79df05336..e140996e6ee4 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -50,6 +50,7 @@ config VIDEO_CX88_DVB_ALL_FRONTENDS
50 depends on VIDEO_CX88_DVB 50 depends on VIDEO_CX88_DVB
51 select DVB_MT352 51 select DVB_MT352
52 select VIDEO_CX88_VP3054 52 select VIDEO_CX88_VP3054
53 select DVB_ZL10353
53 select DVB_OR51132 54 select DVB_OR51132
54 select DVB_CX22702 55 select DVB_CX22702
55 select DVB_LGDT330X 56 select DVB_LGDT330X
@@ -81,6 +82,16 @@ config VIDEO_CX88_VP3054
81 which also require support for the VP-3054 82 which also require support for the VP-3054
82 Secondary I2C bus, such at DNTV Live! DVB-T Pro. 83 Secondary I2C bus, such at DNTV Live! DVB-T Pro.
83 84
85config VIDEO_CX88_DVB_ZL10353
86 bool "Zarlink ZL10353 DVB-T Support"
87 default y
88 depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS
89 select DVB_ZL10353
90 ---help---
91 This adds DVB-T support for cards based on the
92 Connexant 2388x chip and the ZL10353 demodulator,
93 successor to the Zarlink MT352.
94
84config VIDEO_CX88_DVB_OR51132 95config VIDEO_CX88_DVB_OR51132
85 bool "OR51132 ATSC Support" 96 bool "OR51132 ATSC Support"
86 default y 97 default y
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 2b902784facc..6482b9aa6a1f 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -17,6 +17,7 @@ extra-cflags-$(CONFIG_DVB_CX22702) += -DHAVE_CX22702=1
17extra-cflags-$(CONFIG_DVB_OR51132) += -DHAVE_OR51132=1 17extra-cflags-$(CONFIG_DVB_OR51132) += -DHAVE_OR51132=1
18extra-cflags-$(CONFIG_DVB_LGDT330X) += -DHAVE_LGDT330X=1 18extra-cflags-$(CONFIG_DVB_LGDT330X) += -DHAVE_LGDT330X=1
19extra-cflags-$(CONFIG_DVB_MT352) += -DHAVE_MT352=1 19extra-cflags-$(CONFIG_DVB_MT352) += -DHAVE_MT352=1
20extra-cflags-$(CONFIG_DVB_ZL10353) += -DHAVE_ZL10353=1
20extra-cflags-$(CONFIG_DVB_NXT200X) += -DHAVE_NXT200X=1 21extra-cflags-$(CONFIG_DVB_NXT200X) += -DHAVE_NXT200X=1
21extra-cflags-$(CONFIG_DVB_CX24123) += -DHAVE_CX24123=1 22extra-cflags-$(CONFIG_DVB_CX24123) += -DHAVE_CX24123=1
22extra-cflags-$(CONFIG_VIDEO_CX88_VP3054)+= -DHAVE_VP3054_I2C=1 23extra-cflags-$(CONFIG_VIDEO_CX88_VP3054)+= -DHAVE_VP3054_I2C=1
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 2acccd6d49bc..bffef1decc8b 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -672,6 +672,11 @@ static int __devinit snd_cx88_create(snd_card_t *card, struct pci_dev *pci,
672 chip = (snd_cx88_card_t *) card->private_data; 672 chip = (snd_cx88_card_t *) card->private_data;
673 673
674 core = cx88_core_get(pci); 674 core = cx88_core_get(pci);
675 if (NULL == core) {
676 err = -EINVAL;
677 kfree (chip);
678 return err;
679 }
675 680
676 if (!pci_dma_supported(pci,0xffffffff)) { 681 if (!pci_dma_supported(pci,0xffffffff)) {
677 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); 682 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
@@ -688,11 +693,6 @@ static int __devinit snd_cx88_create(snd_card_t *card, struct pci_dev *pci,
688 spin_lock_init(&chip->reg_lock); 693 spin_lock_init(&chip->reg_lock);
689 694
690 cx88_reset(core); 695 cx88_reset(core);
691 if (NULL == core) {
692 err = -EINVAL;
693 kfree (chip);
694 return err;
695 }
696 chip->core = core; 696 chip->core = core;
697 697
698 /* get irq */ 698 /* get irq */
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 1bc999247fdc..c7042cf41231 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -184,17 +184,18 @@ struct cx88_board cx88_boards[] = {
184 .input = {{ 184 .input = {{
185 .type = CX88_VMUX_TELEVISION, 185 .type = CX88_VMUX_TELEVISION,
186 .vmux = 0, 186 .vmux = 0,
187 .gpio1 = 0x309f, 187 .gpio1 = 0xe09f,
188 },{ 188 },{
189 .type = CX88_VMUX_COMPOSITE1, 189 .type = CX88_VMUX_COMPOSITE1,
190 .vmux = 1, 190 .vmux = 1,
191 .gpio1 = 0x305f, 191 .gpio1 = 0xe05f,
192 },{ 192 },{
193 .type = CX88_VMUX_SVIDEO, 193 .type = CX88_VMUX_SVIDEO,
194 .vmux = 2, 194 .vmux = 2,
195 .gpio1 = 0x305f, 195 .gpio1 = 0xe05f,
196 }}, 196 }},
197 .radio = { 197 .radio = {
198 .gpio1 = 0xe0df,
198 .type = CX88_RADIO, 199 .type = CX88_RADIO,
199 }, 200 },
200 }, 201 },
@@ -322,19 +323,19 @@ struct cx88_board cx88_boards[] = {
322 .input = {{ 323 .input = {{
323 .type = CX88_VMUX_TELEVISION, 324 .type = CX88_VMUX_TELEVISION,
324 .vmux = 0, 325 .vmux = 0,
325 .gpio0 = 0xff00, 326 .gpio0 = 0xbff0,
326 },{ 327 },{
327 .type = CX88_VMUX_COMPOSITE1, 328 .type = CX88_VMUX_COMPOSITE1,
328 .vmux = 1, 329 .vmux = 1,
329 .gpio0 = 0xff03, 330 .gpio0 = 0xbff3,
330 },{ 331 },{
331 .type = CX88_VMUX_SVIDEO, 332 .type = CX88_VMUX_SVIDEO,
332 .vmux = 2, 333 .vmux = 2,
333 .gpio0 = 0xff03, 334 .gpio0 = 0xbff3,
334 }}, 335 }},
335 .radio = { 336 .radio = {
336 .type = CX88_RADIO, 337 .type = CX88_RADIO,
337 .gpio0 = 0xff00, 338 .gpio0 = 0xbff0,
338 }, 339 },
339 }, 340 },
340 [CX88_BOARD_ASUS_PVR_416] = { 341 [CX88_BOARD_ASUS_PVR_416] = {
@@ -1048,6 +1049,50 @@ struct cx88_board cx88_boards[] = {
1048 }}, 1049 }},
1049 .dvb = 1, 1050 .dvb = 1,
1050 }, 1051 },
1052 [CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT] = {
1053 /* FIXME: Standard video using the cx88 broadcast decoder is
1054 * working, but blackbird isn't working yet, audio is only
1055 * working correctly for television mode. S-Video and Composite
1056 * are working for video-only, so I have them disabled for now.
1057 */
1058 .name = "KWorld HardwareMpegTV XPert",
1059 .tuner_type = TUNER_PHILIPS_TDA8290,
1060 .radio_type = UNSET,
1061 .tuner_addr = ADDR_UNSET,
1062 .radio_addr = ADDR_UNSET,
1063 .input = {{
1064 .type = CX88_VMUX_TELEVISION,
1065 .vmux = 0,
1066 .gpio0 = 0x3de2,
1067 .gpio2 = 0x00ff,
1068 }},
1069 .radio = {
1070 .type = CX88_RADIO,
1071 .gpio0 = 0x3de6,
1072 .gpio2 = 0x00ff,
1073 },
1074 },
1075 [CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID] = {
1076 .name = "DViCO FusionHDTV DVB-T Hybrid",
1077 .tuner_type = TUNER_THOMSON_FE6600,
1078 .radio_type = UNSET,
1079 .tuner_addr = ADDR_UNSET,
1080 .radio_addr = ADDR_UNSET,
1081 .input = {{
1082 .type = CX88_VMUX_TELEVISION,
1083 .vmux = 0,
1084 .gpio0 = 0x0000a75f,
1085 },{
1086 .type = CX88_VMUX_COMPOSITE1,
1087 .vmux = 1,
1088 .gpio0 = 0x0000a75b,
1089 },{
1090 .type = CX88_VMUX_SVIDEO,
1091 .vmux = 2,
1092 .gpio0 = 0x0000a75b,
1093 }},
1094 .dvb = 1,
1095 },
1051 1096
1052}; 1097};
1053const unsigned int cx88_bcount = ARRAY_SIZE(cx88_boards); 1098const unsigned int cx88_bcount = ARRAY_SIZE(cx88_boards);
@@ -1254,6 +1299,18 @@ struct cx88_subid cx88_subids[] = {
1254 .subdevice = 0xdb11, 1299 .subdevice = 0xdb11,
1255 .card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS, 1300 .card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS,
1256 /* Re-branded DViCO: UltraView DVB-T Plus */ 1301 /* Re-branded DViCO: UltraView DVB-T Plus */
1302 },{
1303 .subvendor = 0x17de,
1304 .subdevice = 0x0840,
1305 .card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
1306 },{
1307 .subvendor = 0x18ac,
1308 .subdevice = 0xdb40,
1309 .card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID,
1310 },{
1311 .subvendor = 0x18ac,
1312 .subdevice = 0xdb44,
1313 .card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID,
1257 }, 1314 },
1258}; 1315};
1259const unsigned int cx88_idcount = ARRAY_SIZE(cx88_subids); 1316const unsigned int cx88_idcount = ARRAY_SIZE(cx88_subids);
@@ -1373,6 +1430,40 @@ static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
1373} 1430}
1374 1431
1375/* ----------------------------------------------------------------------- */ 1432/* ----------------------------------------------------------------------- */
1433/* some DViCO specific stuff */
1434
1435static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
1436{
1437 struct i2c_msg msg = { .addr = 0x45, .flags = 0 };
1438 int i, err;
1439 static u8 init_bufs[13][5] = {
1440 { 0x10, 0x00, 0x20, 0x01, 0x03 },
1441 { 0x10, 0x10, 0x01, 0x00, 0x21 },
1442 { 0x10, 0x10, 0x10, 0x00, 0xCA },
1443 { 0x10, 0x10, 0x12, 0x00, 0x08 },
1444 { 0x10, 0x10, 0x13, 0x00, 0x0A },
1445 { 0x10, 0x10, 0x16, 0x01, 0xC0 },
1446 { 0x10, 0x10, 0x22, 0x01, 0x3D },
1447 { 0x10, 0x10, 0x73, 0x01, 0x2E },
1448 { 0x10, 0x10, 0x72, 0x00, 0xC5 },
1449 { 0x10, 0x10, 0x71, 0x01, 0x97 },
1450 { 0x10, 0x10, 0x70, 0x00, 0x0F },
1451 { 0x10, 0x10, 0xB0, 0x00, 0x01 },
1452 { 0x03, 0x0C },
1453 };
1454
1455 for (i = 0; i < 13; i++) {
1456 msg.buf = init_bufs[i];
1457 msg.len = (i != 12 ? 5 : 2);
1458 err = i2c_transfer(&core->i2c_adap, &msg, 1);
1459 if (err != 1) {
1460 printk("dvico_fusionhdtv_hybrid_init buf %d failed (err = %d)!\n", i, err);
1461 return;
1462 }
1463 }
1464}
1465
1466/* ----------------------------------------------------------------------- */
1376 1467
1377void cx88_card_list(struct cx88_core *core, struct pci_dev *pci) 1468void cx88_card_list(struct cx88_core *core, struct pci_dev *pci)
1378{ 1469{
@@ -1438,11 +1529,15 @@ void cx88_card_setup(struct cx88_core *core)
1438 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1: 1529 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
1439 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS: 1530 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
1440 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL: 1531 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
1532 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
1441 /* GPIO0:0 is hooked to mt352 reset pin */ 1533 /* GPIO0:0 is hooked to mt352 reset pin */
1442 cx_set(MO_GP0_IO, 0x00000101); 1534 cx_set(MO_GP0_IO, 0x00000101);
1443 cx_clear(MO_GP0_IO, 0x00000001); 1535 cx_clear(MO_GP0_IO, 0x00000001);
1444 msleep(1); 1536 msleep(1);
1445 cx_set(MO_GP0_IO, 0x00000101); 1537 cx_set(MO_GP0_IO, 0x00000101);
1538 if (0 == core->i2c_rc &&
1539 core->board == CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID)
1540 dvico_fusionhdtv_hybrid_init(core);
1446 break; 1541 break;
1447 case CX88_BOARD_KWORLD_DVB_T: 1542 case CX88_BOARD_KWORLD_DVB_T:
1448 case CX88_BOARD_DNTV_LIVE_DVB_T: 1543 case CX88_BOARD_DNTV_LIVE_DVB_T:
@@ -1460,7 +1555,7 @@ void cx88_card_setup(struct cx88_core *core)
1460 if (0 == core->i2c_rc) { 1555 if (0 == core->i2c_rc) {
1461 /* enable tuner */ 1556 /* enable tuner */
1462 int i; 1557 int i;
1463 u8 buffer [] = { 0x10,0x12,0x13,0x04,0x16,0x00,0x14,0x04,0x017,0x00 }; 1558 static const u8 buffer [] = { 0x10,0x12,0x13,0x04,0x16,0x00,0x14,0x04,0x017,0x00 };
1464 core->i2c_client.addr = 0x0a; 1559 core->i2c_client.addr = 0x0a;
1465 1560
1466 for (i = 0; i < 5; i++) 1561 for (i = 0; i < 5; i++)
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 3720f24a25cf..c2cdbafdb77b 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -163,7 +163,7 @@ int cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
163 163
164 /* save pointer to jmp instruction address */ 164 /* save pointer to jmp instruction address */
165 risc->jmp = rp; 165 risc->jmp = rp;
166 BUG_ON((risc->jmp - risc->cpu + 2) / 4 > risc->size); 166 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
167 return 0; 167 return 0;
168} 168}
169 169
@@ -188,7 +188,7 @@ int cx88_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
188 188
189 /* save pointer to jmp instruction address */ 189 /* save pointer to jmp instruction address */
190 risc->jmp = rp; 190 risc->jmp = rp;
191 BUG_ON((risc->jmp - risc->cpu + 2) / 4 > risc->size); 191 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
192 return 0; 192 return 0;
193} 193}
194 194
@@ -215,8 +215,7 @@ int cx88_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
215void 215void
216cx88_free_buffer(struct pci_dev *pci, struct cx88_buffer *buf) 216cx88_free_buffer(struct pci_dev *pci, struct cx88_buffer *buf)
217{ 217{
218 if (in_interrupt()) 218 BUG_ON(in_interrupt());
219 BUG();
220 videobuf_waiton(&buf->vb,0,0); 219 videobuf_waiton(&buf->vb,0,0);
221 videobuf_dma_pci_unmap(pci, &buf->vb.dma); 220 videobuf_dma_pci_unmap(pci, &buf->vb.dma);
222 videobuf_dma_free(&buf->vb.dma); 221 videobuf_dma_free(&buf->vb.dma);
@@ -1061,7 +1060,7 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
1061 core->pci_bus = pci->bus->number; 1060 core->pci_bus = pci->bus->number;
1062 core->pci_slot = PCI_SLOT(pci->devfn); 1061 core->pci_slot = PCI_SLOT(pci->devfn);
1063 core->pci_irqmask = 0x00fc00; 1062 core->pci_irqmask = 0x00fc00;
1064 init_MUTEX(&core->lock); 1063 mutex_init(&core->lock);
1065 1064
1066 core->nr = cx88_devcount++; 1065 core->nr = cx88_devcount++;
1067 sprintf(core->name,"cx88[%d]",core->nr); 1066 sprintf(core->name,"cx88[%d]",core->nr);
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index e48aa3f6e500..a9fc2695b157 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -40,6 +40,9 @@
40# include "cx88-vp3054-i2c.h" 40# include "cx88-vp3054-i2c.h"
41# endif 41# endif
42#endif 42#endif
43#ifdef HAVE_ZL10353
44# include "zl10353.h"
45#endif
43#ifdef HAVE_CX22702 46#ifdef HAVE_CX22702
44# include "cx22702.h" 47# include "cx22702.h"
45#endif 48#endif
@@ -111,6 +114,21 @@ static struct videobuf_queue_ops dvb_qops = {
111 114
112/* ------------------------------------------------------------------ */ 115/* ------------------------------------------------------------------ */
113 116
117#if defined(HAVE_MT352) || defined(HAVE_ZL10353)
118static int zarlink_pll_set(struct dvb_frontend *fe,
119 struct dvb_frontend_parameters *params,
120 u8 *pllbuf)
121{
122 struct cx8802_dev *dev = fe->dvb->priv;
123
124 pllbuf[0] = dev->core->pll_addr << 1;
125 dvb_pll_configure(dev->core->pll_desc, pllbuf + 1,
126 params->frequency,
127 params->u.ofdm.bandwidth);
128 return 0;
129}
130#endif
131
114#ifdef HAVE_MT352 132#ifdef HAVE_MT352
115static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe) 133static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
116{ 134{
@@ -176,35 +194,22 @@ static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
176 return 0; 194 return 0;
177} 195}
178 196
179static int mt352_pll_set(struct dvb_frontend* fe,
180 struct dvb_frontend_parameters* params,
181 u8* pllbuf)
182{
183 struct cx8802_dev *dev= fe->dvb->priv;
184
185 pllbuf[0] = dev->core->pll_addr << 1;
186 dvb_pll_configure(dev->core->pll_desc, pllbuf+1,
187 params->frequency,
188 params->u.ofdm.bandwidth);
189 return 0;
190}
191
192static struct mt352_config dvico_fusionhdtv = { 197static struct mt352_config dvico_fusionhdtv = {
193 .demod_address = 0x0F, 198 .demod_address = 0x0F,
194 .demod_init = dvico_fusionhdtv_demod_init, 199 .demod_init = dvico_fusionhdtv_demod_init,
195 .pll_set = mt352_pll_set, 200 .pll_set = zarlink_pll_set,
196}; 201};
197 202
198static struct mt352_config dntv_live_dvbt_config = { 203static struct mt352_config dntv_live_dvbt_config = {
199 .demod_address = 0x0f, 204 .demod_address = 0x0f,
200 .demod_init = dntv_live_dvbt_demod_init, 205 .demod_init = dntv_live_dvbt_demod_init,
201 .pll_set = mt352_pll_set, 206 .pll_set = zarlink_pll_set,
202}; 207};
203 208
204static struct mt352_config dvico_fusionhdtv_dual = { 209static struct mt352_config dvico_fusionhdtv_dual = {
205 .demod_address = 0x0F, 210 .demod_address = 0x0F,
206 .demod_init = dvico_dual_demod_init, 211 .demod_init = dvico_dual_demod_init,
207 .pll_set = mt352_pll_set, 212 .pll_set = zarlink_pll_set,
208}; 213};
209 214
210#ifdef HAVE_VP3054_I2C 215#ifdef HAVE_VP3054_I2C
@@ -294,6 +299,46 @@ static struct mt352_config dntv_live_dvbt_pro_config = {
294#endif 299#endif
295#endif 300#endif
296 301
302#ifdef HAVE_ZL10353
303static int dvico_hybrid_tune_pll(struct dvb_frontend *fe,
304 struct dvb_frontend_parameters *params,
305 u8 *pllbuf)
306{
307 struct cx8802_dev *dev= fe->dvb->priv;
308 struct i2c_msg msg =
309 { .addr = dev->core->pll_addr, .flags = 0,
310 .buf = pllbuf + 1, .len = 4 };
311 int err;
312
313 pllbuf[0] = dev->core->pll_addr << 1;
314 dvb_pll_configure(dev->core->pll_desc, pllbuf + 1,
315 params->frequency,
316 params->u.ofdm.bandwidth);
317
318 if ((err = i2c_transfer(&dev->core->i2c_adap, &msg, 1)) != 1) {
319 printk(KERN_WARNING "cx88-dvb: %s error "
320 "(addr %02x <- %02x, err = %i)\n",
321 __FUNCTION__, pllbuf[0], pllbuf[1], err);
322 if (err < 0)
323 return err;
324 else
325 return -EREMOTEIO;
326 }
327
328 return 0;
329}
330
331static struct zl10353_config dvico_fusionhdtv_hybrid = {
332 .demod_address = 0x0F,
333 .pll_set = dvico_hybrid_tune_pll,
334};
335
336static struct zl10353_config dvico_fusionhdtv_plus_v1_1 = {
337 .demod_address = 0x0F,
338 .pll_set = zarlink_pll_set,
339};
340#endif
341
297#ifdef HAVE_CX22702 342#ifdef HAVE_CX22702
298static struct cx22702_config connexant_refboard_config = { 343static struct cx22702_config connexant_refboard_config = {
299 .demod_address = 0x43, 344 .demod_address = 0x43,
@@ -500,16 +545,27 @@ static int dvb_register(struct cx8802_dev *dev)
500 &dev->core->i2c_adap); 545 &dev->core->i2c_adap);
501 break; 546 break;
502#endif 547#endif
548#if defined(HAVE_MT352) || defined(HAVE_ZL10353)
549 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
550 dev->core->pll_addr = 0x60;
551 dev->core->pll_desc = &dvb_pll_thomson_dtt7579;
503#ifdef HAVE_MT352 552#ifdef HAVE_MT352
504 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
505 dev->core->pll_addr = 0x61;
506 dev->core->pll_desc = &dvb_pll_lg_z201;
507 dev->dvb.frontend = mt352_attach(&dvico_fusionhdtv, 553 dev->dvb.frontend = mt352_attach(&dvico_fusionhdtv,
508 &dev->core->i2c_adap); 554 &dev->core->i2c_adap);
555 if (dev->dvb.frontend != NULL)
556 break;
557#endif
558#ifdef HAVE_ZL10353
559 /* ZL10353 replaces MT352 on later cards */
560 dev->dvb.frontend = zl10353_attach(&dvico_fusionhdtv_plus_v1_1,
561 &dev->core->i2c_adap);
562#endif
509 break; 563 break;
510 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS: 564#endif /* HAVE_MT352 || HAVE_ZL10353 */
511 dev->core->pll_addr = 0x60; 565#ifdef HAVE_MT352
512 dev->core->pll_desc = &dvb_pll_thomson_dtt7579; 566 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
567 dev->core->pll_addr = 0x61;
568 dev->core->pll_desc = &dvb_pll_lg_z201;
513 dev->dvb.frontend = mt352_attach(&dvico_fusionhdtv, 569 dev->dvb.frontend = mt352_attach(&dvico_fusionhdtv,
514 &dev->core->i2c_adap); 570 &dev->core->i2c_adap);
515 break; 571 break;
@@ -540,6 +596,14 @@ static int dvb_register(struct cx8802_dev *dev)
540 &dev->core->i2c_adap); 596 &dev->core->i2c_adap);
541 break; 597 break;
542#endif 598#endif
599#ifdef HAVE_ZL10353
600 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
601 dev->core->pll_addr = 0x61;
602 dev->core->pll_desc = &dvb_pll_thomson_fe6600;
603 dev->dvb.frontend = zl10353_attach(&dvico_fusionhdtv_hybrid,
604 &dev->core->i2c_adap);
605 break;
606#endif
543#ifdef HAVE_OR51132 607#ifdef HAVE_OR51132
544 case CX88_BOARD_PCHDTV_HD3000: 608 case CX88_BOARD_PCHDTV_HD3000:
545 dev->dvb.frontend = or51132_attach(&pchdtv_hd3000, 609 dev->dvb.frontend = or51132_attach(&pchdtv_hd3000,
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 165d948624a3..78a63b7dd380 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -34,337 +34,6 @@
34 34
35/* ---------------------------------------------------------------------- */ 35/* ---------------------------------------------------------------------- */
36 36
37/* DigitalNow DNTV Live DVB-T Remote */
38static IR_KEYTAB_TYPE ir_codes_dntv_live_dvb_t[IR_KEYTAB_SIZE] = {
39 [0x00] = KEY_ESC, /* 'go up a level?' */
40 /* Keys 0 to 9 */
41 [0x0a] = KEY_KP0,
42 [0x01] = KEY_KP1,
43 [0x02] = KEY_KP2,
44 [0x03] = KEY_KP3,
45 [0x04] = KEY_KP4,
46 [0x05] = KEY_KP5,
47 [0x06] = KEY_KP6,
48 [0x07] = KEY_KP7,
49 [0x08] = KEY_KP8,
50 [0x09] = KEY_KP9,
51
52 [0x0b] = KEY_TUNER, /* tv/fm */
53 [0x0c] = KEY_SEARCH, /* scan */
54 [0x0d] = KEY_STOP,
55 [0x0e] = KEY_PAUSE,
56 [0x0f] = KEY_LIST, /* source */
57
58 [0x10] = KEY_MUTE,
59 [0x11] = KEY_REWIND, /* backward << */
60 [0x12] = KEY_POWER,
61 [0x13] = KEY_S, /* snap */
62 [0x14] = KEY_AUDIO, /* stereo */
63 [0x15] = KEY_CLEAR, /* reset */
64 [0x16] = KEY_PLAY,
65 [0x17] = KEY_ENTER,
66 [0x18] = KEY_ZOOM, /* full screen */
67 [0x19] = KEY_FASTFORWARD, /* forward >> */
68 [0x1a] = KEY_CHANNELUP,
69 [0x1b] = KEY_VOLUMEUP,
70 [0x1c] = KEY_INFO, /* preview */
71 [0x1d] = KEY_RECORD, /* record */
72 [0x1e] = KEY_CHANNELDOWN,
73 [0x1f] = KEY_VOLUMEDOWN,
74};
75
76/* ---------------------------------------------------------------------- */
77
78/* IO-DATA BCTV7E Remote */
79static IR_KEYTAB_TYPE ir_codes_iodata_bctv7e[IR_KEYTAB_SIZE] = {
80 [0x40] = KEY_TV,
81 [0x20] = KEY_RADIO, /* FM */
82 [0x60] = KEY_EPG,
83 [0x00] = KEY_POWER,
84
85 /* Keys 0 to 9 */
86 [0x44] = KEY_KP0, /* 10 */
87 [0x50] = KEY_KP1,
88 [0x30] = KEY_KP2,
89 [0x70] = KEY_KP3,
90 [0x48] = KEY_KP4,
91 [0x28] = KEY_KP5,
92 [0x68] = KEY_KP6,
93 [0x58] = KEY_KP7,
94 [0x38] = KEY_KP8,
95 [0x78] = KEY_KP9,
96
97 [0x10] = KEY_L, /* Live */
98 [0x08] = KEY_T, /* Time Shift */
99
100 [0x18] = KEY_PLAYPAUSE, /* Play */
101
102 [0x24] = KEY_ENTER, /* 11 */
103 [0x64] = KEY_ESC, /* 12 */
104 [0x04] = KEY_M, /* Multi */
105
106 [0x54] = KEY_VIDEO,
107 [0x34] = KEY_CHANNELUP,
108 [0x74] = KEY_VOLUMEUP,
109 [0x14] = KEY_MUTE,
110
111 [0x4c] = KEY_S, /* SVIDEO */
112 [0x2c] = KEY_CHANNELDOWN,
113 [0x6c] = KEY_VOLUMEDOWN,
114 [0x0c] = KEY_ZOOM,
115
116 [0x5c] = KEY_PAUSE,
117 [0x3c] = KEY_C, /* || (red) */
118 [0x7c] = KEY_RECORD, /* recording */
119 [0x1c] = KEY_STOP,
120
121 [0x41] = KEY_REWIND, /* backward << */
122 [0x21] = KEY_PLAY,
123 [0x61] = KEY_FASTFORWARD, /* forward >> */
124 [0x01] = KEY_NEXT, /* skip >| */
125};
126
127/* ---------------------------------------------------------------------- */
128
129/* ADS Tech Instant TV DVB-T PCI Remote */
130static IR_KEYTAB_TYPE ir_codes_adstech_dvb_t_pci[IR_KEYTAB_SIZE] = {
131 /* Keys 0 to 9 */
132 [0x4d] = KEY_0,
133 [0x57] = KEY_1,
134 [0x4f] = KEY_2,
135 [0x53] = KEY_3,
136 [0x56] = KEY_4,
137 [0x4e] = KEY_5,
138 [0x5e] = KEY_6,
139 [0x54] = KEY_7,
140 [0x4c] = KEY_8,
141 [0x5c] = KEY_9,
142
143 [0x5b] = KEY_POWER,
144 [0x5f] = KEY_MUTE,
145 [0x55] = KEY_GOTO,
146 [0x5d] = KEY_SEARCH,
147 [0x17] = KEY_EPG, /* Guide */
148 [0x1f] = KEY_MENU,
149 [0x0f] = KEY_UP,
150 [0x46] = KEY_DOWN,
151 [0x16] = KEY_LEFT,
152 [0x1e] = KEY_RIGHT,
153 [0x0e] = KEY_SELECT, /* Enter */
154 [0x5a] = KEY_INFO,
155 [0x52] = KEY_EXIT,
156 [0x59] = KEY_PREVIOUS,
157 [0x51] = KEY_NEXT,
158 [0x58] = KEY_REWIND,
159 [0x50] = KEY_FORWARD,
160 [0x44] = KEY_PLAYPAUSE,
161 [0x07] = KEY_STOP,
162 [0x1b] = KEY_RECORD,
163 [0x13] = KEY_TUNER, /* Live */
164 [0x0a] = KEY_A,
165 [0x12] = KEY_B,
166 [0x03] = KEY_PROG1, /* 1 */
167 [0x01] = KEY_PROG2, /* 2 */
168 [0x00] = KEY_PROG3, /* 3 */
169 [0x06] = KEY_DVD,
170 [0x48] = KEY_AUX, /* Photo */
171 [0x40] = KEY_VIDEO,
172 [0x19] = KEY_AUDIO, /* Music */
173 [0x0b] = KEY_CHANNELUP,
174 [0x08] = KEY_CHANNELDOWN,
175 [0x15] = KEY_VOLUMEUP,
176 [0x1c] = KEY_VOLUMEDOWN,
177};
178
179/* ---------------------------------------------------------------------- */
180
181/* MSI TV@nywhere remote */
182static IR_KEYTAB_TYPE ir_codes_msi_tvanywhere[IR_KEYTAB_SIZE] = {
183 /* Keys 0 to 9 */
184 [0x00] = KEY_0,
185 [0x01] = KEY_1,
186 [0x02] = KEY_2,
187 [0x03] = KEY_3,
188 [0x04] = KEY_4,
189 [0x05] = KEY_5,
190 [0x06] = KEY_6,
191 [0x07] = KEY_7,
192 [0x08] = KEY_8,
193 [0x09] = KEY_9,
194
195 [0x0c] = KEY_MUTE,
196 [0x0f] = KEY_SCREEN, /* Full Screen */
197 [0x10] = KEY_F, /* Funtion */
198 [0x11] = KEY_T, /* Time shift */
199 [0x12] = KEY_POWER,
200 [0x13] = KEY_MEDIA, /* MTS */
201 [0x14] = KEY_SLOW,
202 [0x16] = KEY_REWIND, /* backward << */
203 [0x17] = KEY_ENTER, /* Return */
204 [0x18] = KEY_FASTFORWARD, /* forward >> */
205 [0x1a] = KEY_CHANNELUP,
206 [0x1b] = KEY_VOLUMEUP,
207 [0x1e] = KEY_CHANNELDOWN,
208 [0x1f] = KEY_VOLUMEDOWN,
209};
210
211/* ---------------------------------------------------------------------- */
212
213/* Cinergy 1400 DVB-T */
214static IR_KEYTAB_TYPE ir_codes_cinergy_1400[IR_KEYTAB_SIZE] = {
215 [0x01] = KEY_POWER,
216 [0x02] = KEY_1,
217 [0x03] = KEY_2,
218 [0x04] = KEY_3,
219 [0x05] = KEY_4,
220 [0x06] = KEY_5,
221 [0x07] = KEY_6,
222 [0x08] = KEY_7,
223 [0x09] = KEY_8,
224 [0x0a] = KEY_9,
225 [0x0c] = KEY_0,
226
227 [0x0b] = KEY_VIDEO,
228 [0x0d] = KEY_REFRESH,
229 [0x0e] = KEY_SELECT,
230 [0x0f] = KEY_EPG,
231 [0x10] = KEY_UP,
232 [0x11] = KEY_LEFT,
233 [0x12] = KEY_OK,
234 [0x13] = KEY_RIGHT,
235 [0x14] = KEY_DOWN,
236 [0x15] = KEY_TEXT,
237 [0x16] = KEY_INFO,
238
239 [0x17] = KEY_RED,
240 [0x18] = KEY_GREEN,
241 [0x19] = KEY_YELLOW,
242 [0x1a] = KEY_BLUE,
243
244 [0x1b] = KEY_CHANNELUP,
245 [0x1c] = KEY_VOLUMEUP,
246 [0x1d] = KEY_MUTE,
247 [0x1e] = KEY_VOLUMEDOWN,
248 [0x1f] = KEY_CHANNELDOWN,
249
250 [0x40] = KEY_PAUSE,
251 [0x4c] = KEY_PLAY,
252 [0x58] = KEY_RECORD,
253 [0x54] = KEY_PREVIOUS,
254 [0x48] = KEY_STOP,
255 [0x5c] = KEY_NEXT,
256};
257
258/* ---------------------------------------------------------------------- */
259
260/* AVERTV STUDIO 303 Remote */
261static IR_KEYTAB_TYPE ir_codes_avertv_303[IR_KEYTAB_SIZE] = {
262 [ 0x2a ] = KEY_KP1,
263 [ 0x32 ] = KEY_KP2,
264 [ 0x3a ] = KEY_KP3,
265 [ 0x4a ] = KEY_KP4,
266 [ 0x52 ] = KEY_KP5,
267 [ 0x5a ] = KEY_KP6,
268 [ 0x6a ] = KEY_KP7,
269 [ 0x72 ] = KEY_KP8,
270 [ 0x7a ] = KEY_KP9,
271 [ 0x0e ] = KEY_KP0,
272
273 [ 0x02 ] = KEY_POWER,
274 [ 0x22 ] = KEY_VIDEO,
275 [ 0x42 ] = KEY_AUDIO,
276 [ 0x62 ] = KEY_ZOOM,
277 [ 0x0a ] = KEY_TV,
278 [ 0x12 ] = KEY_CD,
279 [ 0x1a ] = KEY_TEXT,
280
281 [ 0x16 ] = KEY_SUBTITLE,
282 [ 0x1e ] = KEY_REWIND,
283 [ 0x06 ] = KEY_PRINT,
284
285 [ 0x2e ] = KEY_SEARCH,
286 [ 0x36 ] = KEY_SLEEP,
287 [ 0x3e ] = KEY_SHUFFLE,
288 [ 0x26 ] = KEY_MUTE,
289
290 [ 0x4e ] = KEY_RECORD,
291 [ 0x56 ] = KEY_PAUSE,
292 [ 0x5e ] = KEY_STOP,
293 [ 0x46 ] = KEY_PLAY,
294
295 [ 0x6e ] = KEY_RED,
296 [ 0x0b ] = KEY_GREEN,
297 [ 0x66 ] = KEY_YELLOW,
298 [ 0x03 ] = KEY_BLUE,
299
300 [ 0x76 ] = KEY_LEFT,
301 [ 0x7e ] = KEY_RIGHT,
302 [ 0x13 ] = KEY_DOWN,
303 [ 0x1b ] = KEY_UP,
304};
305
306/* ---------------------------------------------------------------------- */
307
308/* DigitalNow DNTV Live! DVB-T Pro Remote */
309static IR_KEYTAB_TYPE ir_codes_dntv_live_dvbt_pro[IR_KEYTAB_SIZE] = {
310 [ 0x16 ] = KEY_POWER,
311 [ 0x5b ] = KEY_HOME,
312
313 [ 0x55 ] = KEY_TV, /* live tv */
314 [ 0x58 ] = KEY_TUNER, /* digital Radio */
315 [ 0x5a ] = KEY_RADIO, /* FM radio */
316 [ 0x59 ] = KEY_DVD, /* dvd menu */
317 [ 0x03 ] = KEY_1,
318 [ 0x01 ] = KEY_2,
319 [ 0x06 ] = KEY_3,
320 [ 0x09 ] = KEY_4,
321 [ 0x1d ] = KEY_5,
322 [ 0x1f ] = KEY_6,
323 [ 0x0d ] = KEY_7,
324 [ 0x19 ] = KEY_8,
325 [ 0x1b ] = KEY_9,
326 [ 0x0c ] = KEY_CANCEL,
327 [ 0x15 ] = KEY_0,
328 [ 0x4a ] = KEY_CLEAR,
329 [ 0x13 ] = KEY_BACK,
330 [ 0x00 ] = KEY_TAB,
331 [ 0x4b ] = KEY_UP,
332 [ 0x4e ] = KEY_LEFT,
333 [ 0x4f ] = KEY_OK,
334 [ 0x52 ] = KEY_RIGHT,
335 [ 0x51 ] = KEY_DOWN,
336 [ 0x1e ] = KEY_VOLUMEUP,
337 [ 0x0a ] = KEY_VOLUMEDOWN,
338 [ 0x02 ] = KEY_CHANNELDOWN,
339 [ 0x05 ] = KEY_CHANNELUP,
340 [ 0x11 ] = KEY_RECORD,
341 [ 0x14 ] = KEY_PLAY,
342 [ 0x4c ] = KEY_PAUSE,
343 [ 0x1a ] = KEY_STOP,
344 [ 0x40 ] = KEY_REWIND,
345 [ 0x12 ] = KEY_FASTFORWARD,
346 [ 0x41 ] = KEY_PREVIOUSSONG, /* replay |< */
347 [ 0x42 ] = KEY_NEXTSONG, /* skip >| */
348 [ 0x54 ] = KEY_CAMERA, /* capture */
349 [ 0x50 ] = KEY_LANGUAGE, /* sap */
350 [ 0x47 ] = KEY_TV2, /* pip */
351 [ 0x4d ] = KEY_SCREEN,
352 [ 0x43 ] = KEY_SUBTITLE,
353 [ 0x10 ] = KEY_MUTE,
354 [ 0x49 ] = KEY_AUDIO, /* l/r */
355 [ 0x07 ] = KEY_SLEEP,
356 [ 0x08 ] = KEY_VIDEO, /* a/v */
357 [ 0x0e ] = KEY_PREVIOUS, /* recall */
358 [ 0x45 ] = KEY_ZOOM, /* zoom + */
359 [ 0x46 ] = KEY_ANGLE, /* zoom - */
360 [ 0x56 ] = KEY_RED,
361 [ 0x57 ] = KEY_GREEN,
362 [ 0x5c ] = KEY_YELLOW,
363 [ 0x5d ] = KEY_BLUE,
364};
365
366/* ---------------------------------------------------------------------- */
367
368struct cx88_IR { 37struct cx88_IR {
369 struct cx88_core *core; 38 struct cx88_core *core;
370 struct input_dev *input; 39 struct input_dev *input;
@@ -517,6 +186,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
517 ir->mask_keydown = 0x02; 186 ir->mask_keydown = 0x02;
518 ir->polling = 5; /* ms */ 187 ir->polling = 5; /* ms */
519 break; 188 break;
189 case CX88_BOARD_PROLINK_PLAYTVPVR:
520 case CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO: 190 case CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO:
521 ir_codes = ir_codes_pixelview; 191 ir_codes = ir_codes_pixelview;
522 ir->gpio_addr = MO_GP1_IO; 192 ir->gpio_addr = MO_GP1_IO;
@@ -524,6 +194,13 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
524 ir->mask_keyup = 0x80; 194 ir->mask_keyup = 0x80;
525 ir->polling = 1; /* ms */ 195 ir->polling = 1; /* ms */
526 break; 196 break;
197 case CX88_BOARD_KWORLD_LTV883:
198 ir_codes = ir_codes_pixelview;
199 ir->gpio_addr = MO_GP1_IO;
200 ir->mask_keycode = 0x1f;
201 ir->mask_keyup = 0x60;
202 ir->polling = 1; /* ms */
203 break;
527 case CX88_BOARD_ADSTECH_DVB_T_PCI: 204 case CX88_BOARD_ADSTECH_DVB_T_PCI:
528 ir_codes = ir_codes_adstech_dvb_t_pci; 205 ir_codes = ir_codes_adstech_dvb_t_pci;
529 ir->gpio_addr = MO_GP1_IO; 206 ir->gpio_addr = MO_GP1_IO;
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 073494ceab0f..6c97aa740d27 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -227,7 +227,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
227 .minimum = 0x00, 227 .minimum = 0x00,
228 .maximum = 0xff, 228 .maximum = 0xff,
229 .step = 1, 229 .step = 1,
230 .default_value = 0, 230 .default_value = 0x7f,
231 .type = V4L2_CTRL_TYPE_INTEGER, 231 .type = V4L2_CTRL_TYPE_INTEGER,
232 }, 232 },
233 .off = 128, 233 .off = 128,
@@ -255,7 +255,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
255 .minimum = 0, 255 .minimum = 0,
256 .maximum = 0xff, 256 .maximum = 0xff,
257 .step = 1, 257 .step = 1,
258 .default_value = 0, 258 .default_value = 0x7f,
259 .type = V4L2_CTRL_TYPE_INTEGER, 259 .type = V4L2_CTRL_TYPE_INTEGER,
260 }, 260 },
261 .off = 128, 261 .off = 128,
@@ -300,7 +300,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
300 .minimum = 0, 300 .minimum = 0,
301 .maximum = 0x3f, 301 .maximum = 0x3f,
302 .step = 1, 302 .step = 1,
303 .default_value = 0x1f, 303 .default_value = 0x3f,
304 .type = V4L2_CTRL_TYPE_INTEGER, 304 .type = V4L2_CTRL_TYPE_INTEGER,
305 }, 305 },
306 .reg = AUD_VOL_CTL, 306 .reg = AUD_VOL_CTL,
@@ -336,17 +336,17 @@ static int res_get(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bi
336 return 1; 336 return 1;
337 337
338 /* is it free? */ 338 /* is it free? */
339 down(&core->lock); 339 mutex_lock(&core->lock);
340 if (dev->resources & bit) { 340 if (dev->resources & bit) {
341 /* no, someone else uses it */ 341 /* no, someone else uses it */
342 up(&core->lock); 342 mutex_unlock(&core->lock);
343 return 0; 343 return 0;
344 } 344 }
345 /* it's free, grab it */ 345 /* it's free, grab it */
346 fh->resources |= bit; 346 fh->resources |= bit;
347 dev->resources |= bit; 347 dev->resources |= bit;
348 dprintk(1,"res: get %d\n",bit); 348 dprintk(1,"res: get %d\n",bit);
349 up(&core->lock); 349 mutex_unlock(&core->lock);
350 return 1; 350 return 1;
351} 351}
352 352
@@ -366,14 +366,13 @@ static
366void res_free(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bits) 366void res_free(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bits)
367{ 367{
368 struct cx88_core *core = dev->core; 368 struct cx88_core *core = dev->core;
369 if ((fh->resources & bits) != bits) 369 BUG_ON((fh->resources & bits) != bits);
370 BUG();
371 370
372 down(&core->lock); 371 mutex_lock(&core->lock);
373 fh->resources &= ~bits; 372 fh->resources &= ~bits;
374 dev->resources &= ~bits; 373 dev->resources &= ~bits;
375 dprintk(1,"res: put %d\n",bits); 374 dprintk(1,"res: put %d\n",bits);
376 up(&core->lock); 375 mutex_unlock(&core->lock);
377} 376}
378 377
379/* ------------------------------------------------------------------ */ 378/* ------------------------------------------------------------------ */
@@ -909,7 +908,8 @@ static int get_control(struct cx88_core *core, struct v4l2_control *ctl)
909 value = c->sreg ? cx_sread(c->sreg) : cx_read(c->reg); 908 value = c->sreg ? cx_sread(c->sreg) : cx_read(c->reg);
910 switch (ctl->id) { 909 switch (ctl->id) {
911 case V4L2_CID_AUDIO_BALANCE: 910 case V4L2_CID_AUDIO_BALANCE:
912 ctl->value = (value & 0x40) ? (value & 0x3f) : (0x40 - (value & 0x3f)); 911 ctl->value = ((value & 0x7f) < 0x40) ? ((value & 0x7f) + 0x40)
912 : (0x7f - (value & 0x7f));
913 break; 913 break;
914 case V4L2_CID_AUDIO_VOLUME: 914 case V4L2_CID_AUDIO_VOLUME:
915 ctl->value = 0x3f - (value & 0x3f); 915 ctl->value = 0x3f - (value & 0x3f);
@@ -918,9 +918,9 @@ static int get_control(struct cx88_core *core, struct v4l2_control *ctl)
918 ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift; 918 ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift;
919 break; 919 break;
920 } 920 }
921 printk("get_control id=0x%X reg=0x%02x val=0x%02x (mask 0x%02x)%s\n", 921 dprintk(1,"get_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
922 ctl->id, c->reg, ctl->value, 922 ctl->id, c->v.name, ctl->value, c->reg,
923 c->mask, c->sreg ? " [shadowed]" : ""); 923 value,c->mask, c->sreg ? " [shadowed]" : "");
924 return 0; 924 return 0;
925} 925}
926 926
@@ -946,7 +946,7 @@ static int set_control(struct cx88_core *core, struct v4l2_control *ctl)
946 mask=c->mask; 946 mask=c->mask;
947 switch (ctl->id) { 947 switch (ctl->id) {
948 case V4L2_CID_AUDIO_BALANCE: 948 case V4L2_CID_AUDIO_BALANCE:
949 value = (ctl->value < 0x40) ? (0x40 - ctl->value) : ctl->value; 949 value = (ctl->value < 0x40) ? (0x7f - ctl->value) : (ctl->value - 0x40);
950 break; 950 break;
951 case V4L2_CID_AUDIO_VOLUME: 951 case V4L2_CID_AUDIO_VOLUME:
952 value = 0x3f - (ctl->value & 0x3f); 952 value = 0x3f - (ctl->value & 0x3f);
@@ -969,9 +969,9 @@ static int set_control(struct cx88_core *core, struct v4l2_control *ctl)
969 value = ((ctl->value - c->off) << c->shift) & c->mask; 969 value = ((ctl->value - c->off) << c->shift) & c->mask;
970 break; 970 break;
971 } 971 }
972 printk("set_control id=0x%X reg=0x%02x val=0x%02x (mask 0x%02x)%s\n", 972 dprintk(1,"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
973 ctl->id, c->reg, value, 973 ctl->id, c->v.name, ctl->value, c->reg, value,
974 mask, c->sreg ? " [shadowed]" : ""); 974 mask, c->sreg ? " [shadowed]" : "");
975 if (c->sreg) { 975 if (c->sreg) {
976 cx_sandor(c->sreg, c->reg, mask, value); 976 cx_sandor(c->sreg, c->reg, mask, value);
977 } else { 977 } else {
@@ -987,8 +987,7 @@ static void init_controls(struct cx88_core *core)
987 987
988 for (i = 0; i < CX8800_CTLS; i++) { 988 for (i = 0; i < CX8800_CTLS; i++) {
989 ctrl.id=cx8800_ctls[i].v.id; 989 ctrl.id=cx8800_ctls[i].v.id;
990 ctrl.value=cx8800_ctls[i].v.default_value 990 ctrl.value=cx8800_ctls[i].v.default_value;
991 +cx8800_ctls[i].off;
992 set_control(core, &ctrl); 991 set_control(core, &ctrl);
993 } 992 }
994} 993}
@@ -1252,7 +1251,7 @@ int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
1252{ 1251{
1253 int err; 1252 int err;
1254 1253
1255 dprintk( 1, "CORE IOCTL: 0x%x\n", cmd ); 1254 dprintk(2, "CORE IOCTL: 0x%x\n", cmd );
1256 if (video_debug > 1) 1255 if (video_debug > 1)
1257 v4l_print_ioctl(core->name,cmd); 1256 v4l_print_ioctl(core->name,cmd);
1258 1257
@@ -1291,9 +1290,9 @@ int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
1291 if (i == ARRAY_SIZE(tvnorms)) 1290 if (i == ARRAY_SIZE(tvnorms))
1292 return -EINVAL; 1291 return -EINVAL;
1293 1292
1294 down(&core->lock); 1293 mutex_lock(&core->lock);
1295 cx88_set_tvnorm(core,&tvnorms[i]); 1294 cx88_set_tvnorm(core,&tvnorms[i]);
1296 up(&core->lock); 1295 mutex_unlock(&core->lock);
1297 return 0; 1296 return 0;
1298 } 1297 }
1299 1298
@@ -1343,10 +1342,10 @@ int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
1343 1342
1344 if (*i >= 4) 1343 if (*i >= 4)
1345 return -EINVAL; 1344 return -EINVAL;
1346 down(&core->lock); 1345 mutex_lock(&core->lock);
1347 cx88_newstation(core); 1346 cx88_newstation(core);
1348 video_mux(core,*i); 1347 video_mux(core,*i);
1349 up(&core->lock); 1348 mutex_unlock(&core->lock);
1350 return 0; 1349 return 0;
1351 } 1350 }
1352 1351
@@ -1438,7 +1437,7 @@ int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
1438 return -EINVAL; 1437 return -EINVAL;
1439 if (1 == radio && f->type != V4L2_TUNER_RADIO) 1438 if (1 == radio && f->type != V4L2_TUNER_RADIO)
1440 return -EINVAL; 1439 return -EINVAL;
1441 down(&core->lock); 1440 mutex_lock(&core->lock);
1442 core->freq = f->frequency; 1441 core->freq = f->frequency;
1443 cx88_newstation(core); 1442 cx88_newstation(core);
1444 cx88_call_i2c_clients(core,VIDIOC_S_FREQUENCY,f); 1443 cx88_call_i2c_clients(core,VIDIOC_S_FREQUENCY,f);
@@ -1447,7 +1446,7 @@ int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
1447 msleep (10); 1446 msleep (10);
1448 cx88_set_tvaudio(core); 1447 cx88_set_tvaudio(core);
1449 1448
1450 up(&core->lock); 1449 mutex_unlock(&core->lock);
1451 return 0; 1450 return 0;
1452 } 1451 }
1453 1452
@@ -1921,11 +1920,11 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1921 pci_set_drvdata(pci_dev,dev); 1920 pci_set_drvdata(pci_dev,dev);
1922 1921
1923 /* initial device configuration */ 1922 /* initial device configuration */
1924 down(&core->lock); 1923 mutex_lock(&core->lock);
1925 cx88_set_tvnorm(core,tvnorms); 1924 cx88_set_tvnorm(core,tvnorms);
1926 init_controls(core); 1925 init_controls(core);
1927 video_mux(core,0); 1926 video_mux(core,0);
1928 up(&core->lock); 1927 mutex_unlock(&core->lock);
1929 1928
1930 /* start tvaudio thread */ 1929 /* start tvaudio thread */
1931 if (core->tuner_type != TUNER_ABSENT) 1930 if (core->tuner_type != TUNER_ABSENT)
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index e9fd55b57fa6..cfa8668784b4 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -35,6 +35,7 @@
35#include "cx88-reg.h" 35#include "cx88-reg.h"
36 36
37#include <linux/version.h> 37#include <linux/version.h>
38#include <linux/mutex.h>
38#define CX88_VERSION_CODE KERNEL_VERSION(0,0,5) 39#define CX88_VERSION_CODE KERNEL_VERSION(0,0,5)
39 40
40#ifndef TRUE 41#ifndef TRUE
@@ -62,7 +63,7 @@
62/* need "shadow" registers for some write-only ones ... */ 63/* need "shadow" registers for some write-only ones ... */
63#define SHADOW_AUD_VOL_CTL 1 64#define SHADOW_AUD_VOL_CTL 1
64#define SHADOW_AUD_BAL_CTL 2 65#define SHADOW_AUD_BAL_CTL 2
65#define SHADOW_MAX 2 66#define SHADOW_MAX 3
66 67
67/* FM Radio deemphasis type */ 68/* FM Radio deemphasis type */
68enum cx88_deemph_type { 69enum cx88_deemph_type {
@@ -187,6 +188,8 @@ extern struct sram_channel cx88_sram_channels[];
187#define CX88_BOARD_DNTV_LIVE_DVB_T_PRO 42 188#define CX88_BOARD_DNTV_LIVE_DVB_T_PRO 42
188#define CX88_BOARD_KWORLD_DVB_T_CX22702 43 189#define CX88_BOARD_KWORLD_DVB_T_CX22702 43
189#define CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL 44 190#define CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL 44
191#define CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT 45
192#define CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID 46
190 193
191enum cx88_itype { 194enum cx88_itype {
192 CX88_VMUX_COMPOSITE1 = 1, 195 CX88_VMUX_COMPOSITE1 = 1,
@@ -308,8 +311,7 @@ struct cx88_core {
308 /* IR remote control state */ 311 /* IR remote control state */
309 struct cx88_IR *ir; 312 struct cx88_IR *ir;
310 313
311 struct semaphore lock; 314 struct mutex lock;
312
313 /* various v4l controls */ 315 /* various v4l controls */
314 u32 freq; 316 u32 freq;
315 317
diff --git a/drivers/media/video/dpc7146.c b/drivers/media/video/dpc7146.c
index 2831bdd12057..0fcc935828f8 100644
--- a/drivers/media/video/dpc7146.c
+++ b/drivers/media/video/dpc7146.c
@@ -1,6 +1,6 @@
1/* 1/*
2 dpc7146.c - v4l2 driver for the dpc7146 demonstration board 2 dpc7146.c - v4l2 driver for the dpc7146 demonstration board
3 3
4 Copyright (C) 2000-2003 Michael Hunold <michael@mihu.de> 4 Copyright (C) 2000-2003 Michael Hunold <michael@mihu.de>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -52,7 +52,7 @@
52#define SAA711X_DECODED_BYTES_OF_TS_2 0x1C 52#define SAA711X_DECODED_BYTES_OF_TS_2 0x1C
53#define SAA711X_STATUS_BYTE 0x1F 53#define SAA711X_STATUS_BYTE 0x1F
54 54
55#define DPC_BOARD_CAN_DO_VBI(dev) (dev->revision != 0) 55#define DPC_BOARD_CAN_DO_VBI(dev) (dev->revision != 0)
56 56
57static int debug = 0; 57static int debug = 0;
58module_param(debug, int, 0); 58module_param(debug, int, 0);
@@ -81,16 +81,16 @@ struct dpc
81 struct video_device *video_dev; 81 struct video_device *video_dev;
82 struct video_device *vbi_dev; 82 struct video_device *vbi_dev;
83 83
84 struct i2c_adapter i2c_adapter; 84 struct i2c_adapter i2c_adapter;
85 struct i2c_client *saa7111a; 85 struct i2c_client *saa7111a;
86 86
87 int cur_input; /* current input */ 87 int cur_input; /* current input */
88}; 88};
89 89
90/* fixme: add vbi stuff here */ 90/* fixme: add vbi stuff here */
91static int dpc_probe(struct saa7146_dev* dev) 91static int dpc_probe(struct saa7146_dev* dev)
92{ 92{
93 struct dpc* dpc = NULL; 93 struct dpc* dpc = NULL;
94 struct i2c_client *client; 94 struct i2c_client *client;
95 struct list_head *item; 95 struct list_head *item;
96 96
@@ -118,20 +118,20 @@ static int dpc_probe(struct saa7146_dev* dev)
118 /* loop through all i2c-devices on the bus and look who is there */ 118 /* loop through all i2c-devices on the bus and look who is there */
119 list_for_each(item,&dpc->i2c_adapter.clients) { 119 list_for_each(item,&dpc->i2c_adapter.clients) {
120 client = list_entry(item, struct i2c_client, list); 120 client = list_entry(item, struct i2c_client, list);
121 if( I2C_SAA7111A == client->addr ) 121 if( I2C_SAA7111A == client->addr )
122 dpc->saa7111a = client; 122 dpc->saa7111a = client;
123 } 123 }
124 124
125 /* check if all devices are present */ 125 /* check if all devices are present */
126 if( 0 == dpc->saa7111a ) { 126 if( 0 == dpc->saa7111a ) {
127 DEB_D(("dpc_v4l2.o: dpc_attach failed for this device.\n")); 127 DEB_D(("dpc_v4l2.o: dpc_attach failed for this device.\n"));
128 i2c_del_adapter(&dpc->i2c_adapter); 128 i2c_del_adapter(&dpc->i2c_adapter);
129 kfree(dpc); 129 kfree(dpc);
130 return -ENODEV; 130 return -ENODEV;
131 } 131 }
132 132
133 /* all devices are present, probe was successful */ 133 /* all devices are present, probe was successful */
134 DEB_D(("dpc_v4l2.o: dpc_probe succeeded for this device.\n")); 134 DEB_D(("dpc_v4l2.o: dpc_probe succeeded for this device.\n"));
135 135
136 /* we store the pointer in our private data field */ 136 /* we store the pointer in our private data field */
137 dev->ext_priv = dpc; 137 dev->ext_priv = dpc;
@@ -182,7 +182,7 @@ static struct saa7146_ext_vv vv_data;
182static int dpc_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data *info) 182static int dpc_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data *info)
183{ 183{
184 struct dpc* dpc = (struct dpc*)dev->ext_priv; 184 struct dpc* dpc = (struct dpc*)dev->ext_priv;
185 185
186 DEB_D(("dpc_v4l2.o: dpc_attach called.\n")); 186 DEB_D(("dpc_v4l2.o: dpc_attach called.\n"));
187 187
188 /* checking for i2c-devices can be omitted here, because we 188 /* checking for i2c-devices can be omitted here, because we
@@ -193,7 +193,7 @@ static int dpc_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data
193 ERR(("cannot register capture v4l2 device. skipping.\n")); 193 ERR(("cannot register capture v4l2 device. skipping.\n"));
194 return -1; 194 return -1;
195 } 195 }
196 196
197 /* initialization stuff (vbi) (only for revision > 0 and for extensions which want it)*/ 197 /* initialization stuff (vbi) (only for revision > 0 and for extensions which want it)*/
198 if( 0 != DPC_BOARD_CAN_DO_VBI(dev)) { 198 if( 0 != DPC_BOARD_CAN_DO_VBI(dev)) {
199 if( 0 != saa7146_register_device(&dpc->vbi_dev, dev, "dpc", VFL_TYPE_VBI)) { 199 if( 0 != saa7146_register_device(&dpc->vbi_dev, dev, "dpc", VFL_TYPE_VBI)) {
@@ -205,18 +205,18 @@ static int dpc_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data
205 205
206 printk("dpc: found 'dpc7146 demonstration board'-%d.\n",dpc_num); 206 printk("dpc: found 'dpc7146 demonstration board'-%d.\n",dpc_num);
207 dpc_num++; 207 dpc_num++;
208 208
209 /* the rest */ 209 /* the rest */
210 dpc->cur_input = 0; 210 dpc->cur_input = 0;
211 dpc_init_done(dev); 211 dpc_init_done(dev);
212 212
213 return 0; 213 return 0;
214} 214}
215 215
216static int dpc_detach(struct saa7146_dev* dev) 216static int dpc_detach(struct saa7146_dev* dev)
217{ 217{
218 struct dpc* dpc = (struct dpc*)dev->ext_priv; 218 struct dpc* dpc = (struct dpc*)dev->ext_priv;
219 219
220 DEB_EE(("dev:%p\n",dev)); 220 DEB_EE(("dev:%p\n",dev));
221 221
222 i2c_release_client(dpc->saa7111a); 222 i2c_release_client(dpc->saa7111a);
@@ -238,25 +238,25 @@ static int dpc_detach(struct saa7146_dev* dev)
238int dpc_vbi_bypass(struct saa7146_dev* dev) 238int dpc_vbi_bypass(struct saa7146_dev* dev)
239{ 239{
240 struct dpc* dpc = (struct dpc*)dev->ext_priv; 240 struct dpc* dpc = (struct dpc*)dev->ext_priv;
241 241
242 int i = 1; 242 int i = 1;
243 243
244 /* switch bypass in saa7111a */ 244 /* switch bypass in saa7111a */
245 if ( 0 != dpc->saa7111a->driver->command(dpc->saa7111a,SAA711X_VBI_BYPASS, &i)) { 245 if ( 0 != dpc->saa7111a->driver->command(dpc->saa7111a,SAA711X_VBI_BYPASS, &i)) {
246 printk("dpc_v4l2.o: VBI_BYPASS: could not address saa7111a.\n"); 246 printk("dpc_v4l2.o: VBI_BYPASS: could not address saa7111a.\n");
247 return -1; 247 return -1;
248 } 248 }
249 249
250 return 0; 250 return 0;
251} 251}
252#endif 252#endif
253 253
254static int dpc_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg) 254static int dpc_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
255{ 255{
256 struct saa7146_dev *dev = fh->dev; 256 struct saa7146_dev *dev = fh->dev;
257 struct dpc* dpc = (struct dpc*)dev->ext_priv; 257 struct dpc* dpc = (struct dpc*)dev->ext_priv;
258/* 258/*
259 struct saa7146_vv *vv = dev->vv_data; 259 struct saa7146_vv *vv = dev->vv_data;
260*/ 260*/
261 switch(cmd) 261 switch(cmd)
262 { 262 {
@@ -264,11 +264,11 @@ static int dpc_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
264 { 264 {
265 struct v4l2_input *i = arg; 265 struct v4l2_input *i = arg;
266 DEB_EE(("VIDIOC_ENUMINPUT %d.\n",i->index)); 266 DEB_EE(("VIDIOC_ENUMINPUT %d.\n",i->index));
267 267
268 if( i->index < 0 || i->index >= DPC_INPUTS) { 268 if( i->index < 0 || i->index >= DPC_INPUTS) {
269 return -EINVAL; 269 return -EINVAL;
270 } 270 }
271 271
272 memcpy(i, &dpc_inputs[i->index], sizeof(struct v4l2_input)); 272 memcpy(i, &dpc_inputs[i->index], sizeof(struct v4l2_input));
273 273
274 DEB_D(("dpc_v4l2.o: v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n",i->index)); 274 DEB_D(("dpc_v4l2.o: v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n",i->index));
@@ -289,13 +289,13 @@ static int dpc_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
289 if (input < 0 || input >= DPC_INPUTS) { 289 if (input < 0 || input >= DPC_INPUTS) {
290 return -EINVAL; 290 return -EINVAL;
291 } 291 }
292 292
293 dpc->cur_input = input; 293 dpc->cur_input = input;
294 294
295 /* fixme: switch input here, switch audio, too! */ 295 /* fixme: switch input here, switch audio, too! */
296// saa7146_set_hps_source_and_sync(dev, input_port_selection[input].hps_source, input_port_selection[input].hps_sync); 296// saa7146_set_hps_source_and_sync(dev, input_port_selection[input].hps_source, input_port_selection[input].hps_sync);
297 printk("dpc_v4l2.o: VIDIOC_S_INPUT: fixme switch input.\n"); 297 printk("dpc_v4l2.o: VIDIOC_S_INPUT: fixme switch input.\n");
298 298
299 return 0; 299 return 0;
300 } 300 }
301 default: 301 default:
@@ -334,8 +334,8 @@ static struct saa7146_standard standard[] = {
334static struct saa7146_extension extension; 334static struct saa7146_extension extension;
335 335
336static struct saa7146_pci_extension_data dpc = { 336static struct saa7146_pci_extension_data dpc = {
337 .ext_priv = "Multimedia eXtension Board", 337 .ext_priv = "Multimedia eXtension Board",
338 .ext = &extension, 338 .ext = &extension,
339}; 339};
340 340
341static struct pci_device_id pci_tbl[] = { 341static struct pci_device_id pci_tbl[] = {
@@ -357,7 +357,7 @@ static struct saa7146_ext_vv vv_data = {
357 .capabilities = V4L2_CAP_VBI_CAPTURE, 357 .capabilities = V4L2_CAP_VBI_CAPTURE,
358 .stds = &standard[0], 358 .stds = &standard[0],
359 .num_stds = sizeof(standard)/sizeof(struct saa7146_standard), 359 .num_stds = sizeof(standard)/sizeof(struct saa7146_standard),
360 .std_callback = &std_callback, 360 .std_callback = &std_callback,
361 .ioctls = &ioctls[0], 361 .ioctls = &ioctls[0],
362 .ioctl = dpc_ioctl, 362 .ioctl = dpc_ioctl,
363}; 363};
@@ -365,7 +365,7 @@ static struct saa7146_ext_vv vv_data = {
365static struct saa7146_extension extension = { 365static struct saa7146_extension extension = {
366 .name = "dpc7146 demonstration board", 366 .name = "dpc7146 demonstration board",
367 .flags = SAA7146_USE_I2C_IRQ, 367 .flags = SAA7146_USE_I2C_IRQ,
368 368
369 .pci_tbl = &pci_tbl[0], 369 .pci_tbl = &pci_tbl[0],
370 .module = THIS_MODULE, 370 .module = THIS_MODULE,
371 371
@@ -375,7 +375,7 @@ static struct saa7146_extension extension = {
375 375
376 .irq_mask = 0, 376 .irq_mask = 0,
377 .irq_func = NULL, 377 .irq_func = NULL,
378}; 378};
379 379
380static int __init dpc_init_module(void) 380static int __init dpc_init_module(void)
381{ 381{
@@ -383,7 +383,7 @@ static int __init dpc_init_module(void)
383 DEB_S(("failed to register extension.\n")); 383 DEB_S(("failed to register extension.\n"));
384 return -ENODEV; 384 return -ENODEV;
385 } 385 }
386 386
387 return 0; 387 return 0;
388} 388}
389 389
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 885fd0170086..5a793ae7cc23 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -5,6 +5,7 @@ config VIDEO_EM28XX
5 select VIDEO_TUNER 5 select VIDEO_TUNER
6 select VIDEO_TVEEPROM 6 select VIDEO_TVEEPROM
7 select VIDEO_IR 7 select VIDEO_IR
8 select VIDEO_SAA711X
8 ---help--- 9 ---help---
9 This is a video4linux driver for Empia 28xx based TV cards. 10 This is a video4linux driver for Empia 28xx based TV cards.
10 11
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 58f7b4194a0d..4e22fc4889e1 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -72,6 +72,24 @@ struct em28xx_board em28xx_boards[] = {
72 .amux = 1, 72 .amux = 1,
73 }}, 73 }},
74 }, 74 },
75 [EM2820_BOARD_KWORLD_PVRTV2800RF] = {
76 .name = "Kworld PVR TV 2800 RF",
77 .is_em2800 = 0,
78 .vchannels = 2,
79 .norm = VIDEO_MODE_PAL,
80 .tda9887_conf = TDA9887_PRESENT,
81 .has_tuner = 1,
82 .decoder = EM28XX_SAA7113,
83 .input = {{
84 .type = EM28XX_VMUX_COMPOSITE1,
85 .vmux = 0,
86 .amux = 1,
87 },{
88 .type = EM28XX_VMUX_SVIDEO,
89 .vmux = 9,
90 .amux = 1,
91 }},
92 },
75 [EM2820_BOARD_TERRATEC_CINERGY_250] = { 93 [EM2820_BOARD_TERRATEC_CINERGY_250] = {
76 .name = "Terratec Cinergy 250 USB", 94 .name = "Terratec Cinergy 250 USB",
77 .vchannels = 3, 95 .vchannels = 3,
@@ -83,7 +101,7 @@ struct em28xx_board em28xx_boards[] = {
83 .input = {{ 101 .input = {{
84 .type = EM28XX_VMUX_TELEVISION, 102 .type = EM28XX_VMUX_TELEVISION,
85 .vmux = 2, 103 .vmux = 2,
86 .amux = 0, 104 .amux = 1,
87 },{ 105 },{
88 .type = EM28XX_VMUX_COMPOSITE1, 106 .type = EM28XX_VMUX_COMPOSITE1,
89 .vmux = 0, 107 .vmux = 0,
@@ -257,27 +275,51 @@ struct usb_device_id em28xx_id_table [] = {
257 { }, 275 { },
258}; 276};
259 277
278void em28xx_pre_card_setup(struct em28xx *dev)
279{
280 /* request some modules */
281 switch(dev->model){
282 case EM2880_BOARD_TERRATEC_PRODIGY_XS:
283 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
284 case EM2880_BOARD_TERRATEC_HYBRID_XS:
285 {
286 em28xx_write_regs_req(dev, 0x00, 0x08, "\x7d", 1); // reset through GPIO?
287 break;
288 }
289 }
290}
291
260void em28xx_card_setup(struct em28xx *dev) 292void em28xx_card_setup(struct em28xx *dev)
261{ 293{
262 /* request some modules */ 294 /* request some modules */
263 if (dev->model == EM2820_BOARD_HAUPPAUGE_WINTV_USB_2) { 295 switch(dev->model){
264 struct tveeprom tv; 296 case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
297 {
298 struct tveeprom tv;
265#ifdef CONFIG_MODULES 299#ifdef CONFIG_MODULES
266 request_module("tveeprom"); 300 request_module("tveeprom");
267 request_module("ir-kbd-i2c"); 301 request_module("ir-kbd-i2c");
268 request_module("msp3400"); 302 request_module("msp3400");
269#endif 303#endif
270 /* Call first TVeeprom */ 304 /* Call first TVeeprom */
305
306 dev->i2c_client.addr = 0xa0 >> 1;
307 tveeprom_hauppauge_analog(&dev->i2c_client, &tv, dev->eedata);
271 308
272 dev->i2c_client.addr = 0xa0 >> 1; 309 dev->tuner_type= tv.tuner_type;
273 tveeprom_hauppauge_analog(&dev->i2c_client, &tv, dev->eedata); 310 if (tv.audio_processor == AUDIO_CHIP_MSP34XX) {
311 dev->i2s_speed=2048000;
312 dev->has_msp34xx=1;
313 } else
314 dev->has_msp34xx=0;
315 break;
316 }
317 case EM2820_BOARD_KWORLD_PVRTV2800RF:
318 {
319 em28xx_write_regs_req(dev,0x00,0x08, "\xf9", 1); // GPIO enables sound on KWORLD PVR TV 2800RF
320 break;
321 }
274 322
275 dev->tuner_type= tv.tuner_type;
276 if (tv.audio_processor == AUDIO_CHIP_MSP34XX) {
277 dev->i2s_speed=2048000;
278 dev->has_msp34xx=1;
279 } else
280 dev->has_msp34xx=0;
281 } 323 }
282} 324}
283 325
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 6ca8631bc36d..5b6cece37aee 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -420,7 +420,6 @@ static int em28xx_set_tuner(int check_eeprom, struct i2c_client *client)
420 tun_setup.mode_mask = T_ANALOG_TV | T_RADIO; 420 tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
421 tun_setup.type = dev->tuner_type; 421 tun_setup.type = dev->tuner_type;
422 tun_setup.addr = dev->tuner_addr; 422 tun_setup.addr = dev->tuner_addr;
423
424 em28xx_i2c_call_clients(dev, TUNER_SET_TYPE_ADDR, &tun_setup); 423 em28xx_i2c_call_clients(dev, TUNER_SET_TYPE_ADDR, &tun_setup);
425 } 424 }
426 425
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 30dfa5370c73..31e89e4f18be 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -43,91 +43,6 @@ MODULE_PARM_DESC(ir_debug,"enable debug messages [IR]");
43#define dprintk(fmt, arg...) if (ir_debug) \ 43#define dprintk(fmt, arg...) if (ir_debug) \
44 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg) 44 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg)
45 45
46/* ---------------------------------------------------------------------- */
47
48static IR_KEYTAB_TYPE ir_codes_em_terratec[IR_KEYTAB_SIZE] = {
49 [ 0x01 ] = KEY_CHANNEL,
50 [ 0x02 ] = KEY_SELECT,
51 [ 0x03 ] = KEY_MUTE,
52 [ 0x04 ] = KEY_POWER,
53 [ 0x05 ] = KEY_KP1,
54 [ 0x06 ] = KEY_KP2,
55 [ 0x07 ] = KEY_KP3,
56 [ 0x08 ] = KEY_CHANNELUP,
57 [ 0x09 ] = KEY_KP4,
58 [ 0x0a ] = KEY_KP5,
59 [ 0x0b ] = KEY_KP6,
60 [ 0x0c ] = KEY_CHANNELDOWN,
61 [ 0x0d ] = KEY_KP7,
62 [ 0x0e ] = KEY_KP8,
63 [ 0x0f ] = KEY_KP9,
64 [ 0x10 ] = KEY_VOLUMEUP,
65 [ 0x11 ] = KEY_KP0,
66 [ 0x12 ] = KEY_MENU,
67 [ 0x13 ] = KEY_PRINT,
68 [ 0x14 ] = KEY_VOLUMEDOWN,
69 [ 0x16 ] = KEY_PAUSE,
70 [ 0x18 ] = KEY_RECORD,
71 [ 0x19 ] = KEY_REWIND,
72 [ 0x1a ] = KEY_PLAY,
73 [ 0x1b ] = KEY_FORWARD,
74 [ 0x1c ] = KEY_BACKSPACE,
75 [ 0x1e ] = KEY_STOP,
76 [ 0x40 ] = KEY_ZOOM,
77};
78
79static IR_KEYTAB_TYPE ir_codes_em_pinnacle_usb[IR_KEYTAB_SIZE] = {
80 [ 0x3a ] = KEY_KP0,
81 [ 0x31 ] = KEY_KP1,
82 [ 0x32 ] = KEY_KP2,
83 [ 0x33 ] = KEY_KP3,
84 [ 0x34 ] = KEY_KP4,
85 [ 0x35 ] = KEY_KP5,
86 [ 0x36 ] = KEY_KP6,
87 [ 0x37 ] = KEY_KP7,
88 [ 0x38 ] = KEY_KP8,
89 [ 0x39 ] = KEY_KP9,
90
91 [ 0x2f ] = KEY_POWER,
92
93 [ 0x2e ] = KEY_P,
94 [ 0x1f ] = KEY_L,
95 [ 0x2b ] = KEY_I,
96
97 [ 0x2d ] = KEY_ZOOM,
98 [ 0x1e ] = KEY_ZOOM,
99 [ 0x1b ] = KEY_VOLUMEUP,
100 [ 0x0f ] = KEY_VOLUMEDOWN,
101 [ 0x17 ] = KEY_CHANNELUP,
102 [ 0x1c ] = KEY_CHANNELDOWN,
103 [ 0x25 ] = KEY_INFO,
104
105 [ 0x3c ] = KEY_MUTE,
106
107 [ 0x3d ] = KEY_LEFT,
108 [ 0x3b ] = KEY_RIGHT,
109
110 [ 0x3f ] = KEY_UP,
111 [ 0x3e ] = KEY_DOWN,
112 [ 0x1a ] = KEY_PAUSE,
113
114 [ 0x1d ] = KEY_MENU,
115 [ 0x19 ] = KEY_PLAY,
116 [ 0x16 ] = KEY_REWIND,
117 [ 0x13 ] = KEY_FORWARD,
118 [ 0x15 ] = KEY_PAUSE,
119 [ 0x0e ] = KEY_REWIND,
120 [ 0x0d ] = KEY_PLAY,
121 [ 0x0b ] = KEY_STOP,
122 [ 0x07 ] = KEY_FORWARD,
123 [ 0x27 ] = KEY_RECORD,
124 [ 0x26 ] = KEY_TUNER,
125 [ 0x29 ] = KEY_TEXT,
126 [ 0x2a ] = KEY_MEDIA,
127 [ 0x18 ] = KEY_EPG,
128 [ 0x27 ] = KEY_RECORD,
129};
130
131/* ----------------------------------------------------------------------- */ 46/* ----------------------------------------------------------------------- */
132 47
133static int get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) 48static int get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 5b267808a9d4..780342f7b239 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -28,6 +28,7 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/bitmap.h>
31#include <linux/usb.h> 32#include <linux/usb.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
33#include <linux/version.h> 34#include <linux/version.h>
@@ -59,8 +60,14 @@ MODULE_LICENSE("GPL");
59static LIST_HEAD(em28xx_devlist); 60static LIST_HEAD(em28xx_devlist);
60 61
61static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 62static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
63static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
64static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
62module_param_array(card, int, NULL, 0444); 65module_param_array(card, int, NULL, 0444);
66module_param_array(video_nr, int, NULL, 0444);
67module_param_array(vbi_nr, int, NULL, 0444);
63MODULE_PARM_DESC(card,"card type"); 68MODULE_PARM_DESC(card,"card type");
69MODULE_PARM_DESC(video_nr,"video device numbers");
70MODULE_PARM_DESC(vbi_nr,"vbi device numbers");
64 71
65static int tuner = -1; 72static int tuner = -1;
66module_param(tuner, int, 0444); 73module_param(tuner, int, 0444);
@@ -70,6 +77,9 @@ static unsigned int video_debug = 0;
70module_param(video_debug,int,0644); 77module_param(video_debug,int,0644);
71MODULE_PARM_DESC(video_debug,"enable debug messages [video]"); 78MODULE_PARM_DESC(video_debug,"enable debug messages [video]");
72 79
80/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */
81static unsigned long em28xx_devused;
82
73/* supported tv norms */ 83/* supported tv norms */
74static struct em28xx_tvnorm tvnorms[] = { 84static struct em28xx_tvnorm tvnorms[] = {
75 { 85 {
@@ -91,23 +101,6 @@ static struct em28xx_tvnorm tvnorms[] = {
91 } 101 }
92}; 102};
93 103
94static const unsigned char saa7114_i2c_init[] = {
95 0x00,0x00,0x01,0x08,0x02,0xc4,0x03,0x30,0x04,0x90,0x05,0x90,0x06,0xeb,0x07,0xe0,
96 0x08,0x88,0x09,0x40,0x0a,0x80,0x0b,0x44,0x0c,0x40,0x0d,0x00,0x0e,0x81,0x0f,0x2a,
97 0x10,0x06,0x11,0x00,0x12,0xc8,0x13,0x80,0x14,0x00,0x15,0x11,0x16,0x01,0x17,0x42,
98 0x18,0x40,0x19,0x80,0x40,0x00,0x41,0xff,0x42,0xff,0x43,0xff,0x44,0xff,0x45,0xff,
99 0x46,0xff,0x47,0xff,0x48,0xff,0x49,0xff,0x4a,0xff,0x4b,0xff,0x4c,0xff,0x4d,0xff,
100 0x4e,0xff,0x4f,0xff,0x50,0xff,0x51,0xff,0x52,0xff,0x53,0xff,0x54,0x5f,0x55,0xff,
101 0x56,0xff,0x57,0xff,0x58,0x00,0x59,0x47,0x5a,0x03,0x5b,0x03,0x5d,0x3e,0x5e,0x00,
102 0x80,0x1c,0x83,0x01,0x84,0xa5,0x85,0x10,0x86,0x45,0x87,0x41,0x88,0xf0,0x88,0x00,
103 0x88,0xf0,0x90,0x00,0x91,0x08,0x92,0x00,0x93,0x80,0x94,0x08,0x95,0x00,0x96,0xc0,
104 0x97,0x02,0x98,0x13,0x99,0x00,0x9a,0x38,0x9b,0x01,0x9c,0x80,0x9d,0x02,0x9e,0x06,
105 0x9f,0x01,0xa0,0x01,0xa1,0x00,0xa2,0x00,0xa4,0x80,0xa5,0x36,0xa6,0x36,0xa8,0x67,
106 0xa9,0x04,0xaa,0x00,0xac,0x33,0xad,0x02,0xae,0x00,0xb0,0xcd,0xb1,0x04,0xb2,0xcd,
107 0xb3,0x04,0xb4,0x01,0xb8,0x00,0xb9,0x00,0xba,0x00,0xbb,0x00,0xbc,0x00,0xbd,0x00,
108 0xbe,0x00,0xbf,0x00
109};
110
111#define TVNORMS ARRAY_SIZE(tvnorms) 104#define TVNORMS ARRAY_SIZE(tvnorms)
112 105
113/* supported controls */ 106/* supported controls */
@@ -134,65 +127,6 @@ static struct v4l2_queryctrl em28xx_qctrl[] = {
134 } 127 }
135}; 128};
136 129
137/* FIXME: These are specific to saa711x - should be moved to its code */
138static struct v4l2_queryctrl saa711x_qctrl[] = {
139 {
140 .id = V4L2_CID_BRIGHTNESS,
141 .type = V4L2_CTRL_TYPE_INTEGER,
142 .name = "Brightness",
143 .minimum = -128,
144 .maximum = 127,
145 .step = 1,
146 .default_value = 0,
147 .flags = 0,
148 },{
149 .id = V4L2_CID_CONTRAST,
150 .type = V4L2_CTRL_TYPE_INTEGER,
151 .name = "Contrast",
152 .minimum = 0x0,
153 .maximum = 0x1f,
154 .step = 0x1,
155 .default_value = 0x10,
156 .flags = 0,
157 },{
158 .id = V4L2_CID_SATURATION,
159 .type = V4L2_CTRL_TYPE_INTEGER,
160 .name = "Saturation",
161 .minimum = 0x0,
162 .maximum = 0x1f,
163 .step = 0x1,
164 .default_value = 0x10,
165 .flags = 0,
166 },{
167 .id = V4L2_CID_RED_BALANCE,
168 .type = V4L2_CTRL_TYPE_INTEGER,
169 .name = "Red chroma balance",
170 .minimum = -128,
171 .maximum = 127,
172 .step = 1,
173 .default_value = 0,
174 .flags = 0,
175 },{
176 .id = V4L2_CID_BLUE_BALANCE,
177 .type = V4L2_CTRL_TYPE_INTEGER,
178 .name = "Blue chroma balance",
179 .minimum = -128,
180 .maximum = 127,
181 .step = 1,
182 .default_value = 0,
183 .flags = 0,
184 },{
185 .id = V4L2_CID_GAMMA,
186 .type = V4L2_CTRL_TYPE_INTEGER,
187 .name = "Gamma",
188 .minimum = 0x0,
189 .maximum = 0x3f,
190 .step = 0x1,
191 .default_value = 0x20,
192 .flags = 0,
193 }
194};
195
196static struct usb_driver em28xx_usb_driver; 130static struct usb_driver em28xx_usb_driver;
197 131
198static DEFINE_MUTEX(em28xx_sysfs_lock); 132static DEFINE_MUTEX(em28xx_sysfs_lock);
@@ -211,6 +145,11 @@ static int em28xx_config(struct em28xx *dev)
211 em28xx_write_regs_req(dev, 0x00, 0x06, "\x40", 1); 145 em28xx_write_regs_req(dev, 0x00, 0x06, "\x40", 1);
212 146
213 /* enable vbi capturing */ 147 /* enable vbi capturing */
148
149/* em28xx_write_regs_req(dev,0x00,0x0e,"\xC0",1); audio register */
150/* em28xx_write_regs_req(dev,0x00,0x0f,"\x80",1); clk register */
151 em28xx_write_regs_req(dev,0x00,0x11,"\x51",1);
152
214 em28xx_audio_usb_mute(dev, 1); 153 em28xx_audio_usb_mute(dev, 1);
215 dev->mute = 1; /* maybe not the right place... */ 154 dev->mute = 1; /* maybe not the right place... */
216 dev->volume = 0x1f; 155 dev->volume = 0x1f;
@@ -230,22 +169,9 @@ static int em28xx_config(struct em28xx *dev)
230static void em28xx_config_i2c(struct em28xx *dev) 169static void em28xx_config_i2c(struct em28xx *dev)
231{ 170{
232 struct v4l2_frequency f; 171 struct v4l2_frequency f;
233 struct video_decoder_init em28xx_vdi = {.data = NULL }; 172 em28xx_i2c_call_clients(dev, VIDIOC_INT_RESET, NULL);
234 173 em28xx_i2c_call_clients(dev, VIDIOC_S_INPUT, &dev->ctl_input);
235 174 em28xx_i2c_call_clients(dev, VIDIOC_STREAMON, NULL);
236 /* configure decoder */
237 if(dev->model == EM2820_BOARD_MSI_VOX_USB_2){
238 em28xx_vdi.data=saa7114_i2c_init;
239 em28xx_vdi.len=sizeof(saa7114_i2c_init);
240 }
241
242
243 em28xx_i2c_call_clients(dev, DECODER_INIT, &em28xx_vdi);
244 em28xx_i2c_call_clients(dev, DECODER_SET_INPUT, &dev->ctl_input);
245/* em28xx_i2c_call_clients(dev,DECODER_SET_PICTURE, &dev->vpic); */
246/* em28xx_i2c_call_clients(dev,DECODER_SET_NORM,&dev->tvnorm->id); */
247/* em28xx_i2c_call_clients(dev,DECODER_ENABLE_OUTPUT,&output); */
248/* em28xx_i2c_call_clients(dev,DECODER_DUMP, NULL); */
249 175
250 /* configure tuner */ 176 /* configure tuner */
251 f.tuner = 0; 177 f.tuner = 0;
@@ -285,8 +211,7 @@ static void video_mux(struct em28xx *dev, int index)
285 dev->ctl_input = index; 211 dev->ctl_input = index;
286 dev->ctl_ainput = INPUT(index)->amux; 212 dev->ctl_ainput = INPUT(index)->amux;
287 213
288 em28xx_i2c_call_clients(dev, DECODER_SET_INPUT, &input); 214 em28xx_i2c_call_clients(dev, VIDIOC_S_INPUT, &input);
289
290 215
291 em28xx_videodbg("Setting input index=%d, vmux=%d, amux=%d\n",index,input,dev->ctl_ainput); 216 em28xx_videodbg("Setting input index=%d, vmux=%d, amux=%d\n",index,input,dev->ctl_ainput);
292 217
@@ -298,11 +223,11 @@ static void video_mux(struct em28xx *dev, int index)
298 em28xx_audio_source(dev, ainput); 223 em28xx_audio_source(dev, ainput);
299 } else { 224 } else {
300 switch (dev->ctl_ainput) { 225 switch (dev->ctl_ainput) {
301 case 0: 226 case 0:
302 ainput = EM28XX_AUDIO_SRC_TUNER; 227 ainput = EM28XX_AUDIO_SRC_TUNER;
303 break; 228 break;
304 default: 229 default:
305 ainput = EM28XX_AUDIO_SRC_LINE; 230 ainput = EM28XX_AUDIO_SRC_LINE;
306 } 231 }
307 em28xx_audio_source(dev, ainput); 232 em28xx_audio_source(dev, ainput);
308 } 233 }
@@ -323,13 +248,20 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
323 h = list_entry(list, struct em28xx, devlist); 248 h = list_entry(list, struct em28xx, devlist);
324 if (h->vdev->minor == minor) { 249 if (h->vdev->minor == minor) {
325 dev = h; 250 dev = h;
251 dev->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
252 }
253 if (h->vbi_dev->minor == minor) {
254 dev = h;
255 dev->type = V4L2_BUF_TYPE_VBI_CAPTURE;
326 } 256 }
327 } 257 }
258 if (NULL == dev)
259 return -ENODEV;
328 260
329 filp->private_data=dev; 261 filp->private_data=dev;
330 262
331 263 em28xx_videodbg("open minor=%d type=%s users=%d\n",
332 em28xx_videodbg("users=%d\n", dev->users); 264 minor,v4l2_type_names[dev->type],dev->users);
333 265
334 if (!down_read_trylock(&em28xx_disconnect)) 266 if (!down_read_trylock(&em28xx_disconnect))
335 return -ERESTARTSYS; 267 return -ERESTARTSYS;
@@ -340,40 +272,36 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
340 return -EBUSY; 272 return -EBUSY;
341 } 273 }
342 274
343/* if(dev->vbi_dev->minor == minor){ 275 mutex_init(&dev->fileop_lock); /* to 1 == available */
344 dev->type=V4L2_BUF_TYPE_VBI_CAPTURE;
345 }*/
346 if (dev->vdev->minor == minor) {
347 dev->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
348 }
349
350 init_MUTEX(&dev->fileop_lock); /* to 1 == available */
351 spin_lock_init(&dev->queue_lock); 276 spin_lock_init(&dev->queue_lock);
352 init_waitqueue_head(&dev->wait_frame); 277 init_waitqueue_head(&dev->wait_frame);
353 init_waitqueue_head(&dev->wait_stream); 278 init_waitqueue_head(&dev->wait_stream);
354 279
355 down(&dev->lock); 280 mutex_lock(&dev->lock);
356 281
357 em28xx_set_alternate(dev); 282 if (dev->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
283 em28xx_set_alternate(dev);
358 284
359 dev->width = norm_maxw(dev); 285 dev->width = norm_maxw(dev);
360 dev->height = norm_maxh(dev); 286 dev->height = norm_maxh(dev);
361 dev->frame_size = dev->width * dev->height * 2; 287 dev->frame_size = dev->width * dev->height * 2;
362 dev->field_size = dev->frame_size >> 1; /*both_fileds ? dev->frame_size>>1 : dev->frame_size; */ 288 dev->field_size = dev->frame_size >> 1; /*both_fileds ? dev->frame_size>>1 : dev->frame_size; */
363 dev->bytesperline = dev->width * 2; 289 dev->bytesperline = dev->width * 2;
364 dev->hscale = 0; 290 dev->hscale = 0;
365 dev->vscale = 0; 291 dev->vscale = 0;
366 292
367 em28xx_capture_start(dev, 1); 293 em28xx_capture_start(dev, 1);
368 em28xx_resolution_set(dev); 294 em28xx_resolution_set(dev);
369 295
370 /* device needs to be initialized before isoc transfer */ 296 /* device needs to be initialized before isoc transfer */
371 video_mux(dev, 0); 297 video_mux(dev, 0);
372 298
373 /* start the transfer */ 299 /* start the transfer */
374 errCode = em28xx_init_isoc(dev); 300 errCode = em28xx_init_isoc(dev);
375 if (errCode) 301 if (errCode)
376 goto err; 302 goto err;
303
304 }
377 305
378 dev->users++; 306 dev->users++;
379 filp->private_data = dev; 307 filp->private_data = dev;
@@ -386,10 +314,8 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
386 314
387 dev->state |= DEV_INITIALIZED; 315 dev->state |= DEV_INITIALIZED;
388 316
389 video_mux(dev, 0); 317err:
390 318 mutex_unlock(&dev->lock);
391 err:
392 up(&dev->lock);
393 up_read(&em28xx_disconnect); 319 up_read(&em28xx_disconnect);
394 return errCode; 320 return errCode;
395} 321}
@@ -403,14 +329,21 @@ static void em28xx_release_resources(struct em28xx *dev)
403{ 329{
404 mutex_lock(&em28xx_sysfs_lock); 330 mutex_lock(&em28xx_sysfs_lock);
405 331
406 em28xx_info("V4L2 device /dev/video%d deregistered\n", 332 /*FIXME: I2C IR should be disconnected */
407 dev->vdev->minor); 333
334 em28xx_info("V4L2 devices /dev/video%d and /dev/vbi%d deregistered\n",
335 dev->vdev->minor-MINOR_VFL_TYPE_GRABBER_MIN,
336 dev->vbi_dev->minor-MINOR_VFL_TYPE_VBI_MIN);
408 list_del(&dev->devlist); 337 list_del(&dev->devlist);
409 video_unregister_device(dev->vdev); 338 video_unregister_device(dev->vdev);
410/* video_unregister_device(dev->vbi_dev); */ 339 video_unregister_device(dev->vbi_dev);
411 em28xx_i2c_unregister(dev); 340 em28xx_i2c_unregister(dev);
412 usb_put_dev(dev->udev); 341 usb_put_dev(dev->udev);
413 mutex_unlock(&em28xx_sysfs_lock); 342 mutex_unlock(&em28xx_sysfs_lock);
343
344
345 /* Mark device as unused */
346 em28xx_devused&=~(1<<dev->devno);
414} 347}
415 348
416/* 349/*
@@ -424,7 +357,7 @@ static int em28xx_v4l2_close(struct inode *inode, struct file *filp)
424 357
425 em28xx_videodbg("users=%d\n", dev->users); 358 em28xx_videodbg("users=%d\n", dev->users);
426 359
427 down(&dev->lock); 360 mutex_lock(&dev->lock);
428 361
429 em28xx_uninit_isoc(dev); 362 em28xx_uninit_isoc(dev);
430 363
@@ -433,7 +366,7 @@ static int em28xx_v4l2_close(struct inode *inode, struct file *filp)
433 /* the device is already disconnect, free the remaining resources */ 366 /* the device is already disconnect, free the remaining resources */
434 if (dev->state & DEV_DISCONNECTED) { 367 if (dev->state & DEV_DISCONNECTED) {
435 em28xx_release_resources(dev); 368 em28xx_release_resources(dev);
436 up(&dev->lock); 369 mutex_unlock(&dev->lock);
437 kfree(dev); 370 kfree(dev);
438 return 0; 371 return 0;
439 } 372 }
@@ -449,7 +382,7 @@ static int em28xx_v4l2_close(struct inode *inode, struct file *filp)
449 382
450 dev->users--; 383 dev->users--;
451 wake_up_interruptible_nr(&dev->open, 1); 384 wake_up_interruptible_nr(&dev->open, 1);
452 up(&dev->lock); 385 mutex_unlock(&dev->lock);
453 return 0; 386 return 0;
454} 387}
455 388
@@ -466,32 +399,54 @@ em28xx_v4l2_read(struct file *filp, char __user * buf, size_t count,
466 int ret = 0; 399 int ret = 0;
467 struct em28xx *dev = filp->private_data; 400 struct em28xx *dev = filp->private_data;
468 401
469 if (down_interruptible(&dev->fileop_lock)) 402 if (dev->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
403 em28xx_videodbg("V4l2_Buf_type_videocapture is set\n");
404 }
405 if (dev->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
406 em28xx_videodbg("V4L2_BUF_TYPE_VBI_CAPTURE is set\n");
407 em28xx_videodbg("not supported yet! ...\n");
408 if (copy_to_user(buf, "", 1)) {
409 mutex_unlock(&dev->fileop_lock);
410 return -EFAULT;
411 }
412 return (1);
413 }
414 if (dev->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
415 em28xx_videodbg("V4L2_BUF_TYPE_SLICED_VBI_CAPTURE is set\n");
416 em28xx_videodbg("not supported yet! ...\n");
417 if (copy_to_user(buf, "", 1)) {
418 mutex_unlock(&dev->fileop_lock);
419 return -EFAULT;
420 }
421 return (1);
422 }
423
424 if (mutex_lock_interruptible(&dev->fileop_lock))
470 return -ERESTARTSYS; 425 return -ERESTARTSYS;
471 426
472 if (dev->state & DEV_DISCONNECTED) { 427 if (dev->state & DEV_DISCONNECTED) {
473 em28xx_videodbg("device not present\n"); 428 em28xx_videodbg("device not present\n");
474 up(&dev->fileop_lock); 429 mutex_unlock(&dev->fileop_lock);
475 return -ENODEV; 430 return -ENODEV;
476 } 431 }
477 432
478 if (dev->state & DEV_MISCONFIGURED) { 433 if (dev->state & DEV_MISCONFIGURED) {
479 em28xx_videodbg("device misconfigured; close and open it again\n"); 434 em28xx_videodbg("device misconfigured; close and open it again\n");
480 up(&dev->fileop_lock); 435 mutex_unlock(&dev->fileop_lock);
481 return -EIO; 436 return -EIO;
482 } 437 }
483 438
484 if (dev->io == IO_MMAP) { 439 if (dev->io == IO_MMAP) {
485 em28xx_videodbg ("IO method is set to mmap; close and open" 440 em28xx_videodbg ("IO method is set to mmap; close and open"
486 " the device again to choose the read method\n"); 441 " the device again to choose the read method\n");
487 up(&dev->fileop_lock); 442 mutex_unlock(&dev->fileop_lock);
488 return -EINVAL; 443 return -EINVAL;
489 } 444 }
490 445
491 if (dev->io == IO_NONE) { 446 if (dev->io == IO_NONE) {
492 if (!em28xx_request_buffers(dev, EM28XX_NUM_READ_FRAMES)) { 447 if (!em28xx_request_buffers(dev, EM28XX_NUM_READ_FRAMES)) {
493 em28xx_errdev("read failed, not enough memory\n"); 448 em28xx_errdev("read failed, not enough memory\n");
494 up(&dev->fileop_lock); 449 mutex_unlock(&dev->fileop_lock);
495 return -ENOMEM; 450 return -ENOMEM;
496 } 451 }
497 dev->io = IO_READ; 452 dev->io = IO_READ;
@@ -500,13 +455,13 @@ em28xx_v4l2_read(struct file *filp, char __user * buf, size_t count,
500 } 455 }
501 456
502 if (!count) { 457 if (!count) {
503 up(&dev->fileop_lock); 458 mutex_unlock(&dev->fileop_lock);
504 return 0; 459 return 0;
505 } 460 }
506 461
507 if (list_empty(&dev->outqueue)) { 462 if (list_empty(&dev->outqueue)) {
508 if (filp->f_flags & O_NONBLOCK) { 463 if (filp->f_flags & O_NONBLOCK) {
509 up(&dev->fileop_lock); 464 mutex_unlock(&dev->fileop_lock);
510 return -EAGAIN; 465 return -EAGAIN;
511 } 466 }
512 ret = wait_event_interruptible 467 ret = wait_event_interruptible
@@ -514,11 +469,11 @@ em28xx_v4l2_read(struct file *filp, char __user * buf, size_t count,
514 (!list_empty(&dev->outqueue)) || 469 (!list_empty(&dev->outqueue)) ||
515 (dev->state & DEV_DISCONNECTED)); 470 (dev->state & DEV_DISCONNECTED));
516 if (ret) { 471 if (ret) {
517 up(&dev->fileop_lock); 472 mutex_unlock(&dev->fileop_lock);
518 return ret; 473 return ret;
519 } 474 }
520 if (dev->state & DEV_DISCONNECTED) { 475 if (dev->state & DEV_DISCONNECTED) {
521 up(&dev->fileop_lock); 476 mutex_unlock(&dev->fileop_lock);
522 return -ENODEV; 477 return -ENODEV;
523 } 478 }
524 } 479 }
@@ -537,12 +492,12 @@ em28xx_v4l2_read(struct file *filp, char __user * buf, size_t count,
537 count = f->buf.length; 492 count = f->buf.length;
538 493
539 if (copy_to_user(buf, f->bufmem, count)) { 494 if (copy_to_user(buf, f->bufmem, count)) {
540 up(&dev->fileop_lock); 495 mutex_unlock(&dev->fileop_lock);
541 return -EFAULT; 496 return -EFAULT;
542 } 497 }
543 *f_pos += count; 498 *f_pos += count;
544 499
545 up(&dev->fileop_lock); 500 mutex_unlock(&dev->fileop_lock);
546 501
547 return count; 502 return count;
548} 503}
@@ -556,7 +511,7 @@ static unsigned int em28xx_v4l2_poll(struct file *filp, poll_table * wait)
556 unsigned int mask = 0; 511 unsigned int mask = 0;
557 struct em28xx *dev = filp->private_data; 512 struct em28xx *dev = filp->private_data;
558 513
559 if (down_interruptible(&dev->fileop_lock)) 514 if (mutex_lock_interruptible(&dev->fileop_lock))
560 return POLLERR; 515 return POLLERR;
561 516
562 if (dev->state & DEV_DISCONNECTED) { 517 if (dev->state & DEV_DISCONNECTED) {
@@ -582,13 +537,13 @@ static unsigned int em28xx_v4l2_poll(struct file *filp, poll_table * wait)
582 if (!list_empty(&dev->outqueue)) 537 if (!list_empty(&dev->outqueue))
583 mask |= POLLIN | POLLRDNORM; 538 mask |= POLLIN | POLLRDNORM;
584 539
585 up(&dev->fileop_lock); 540 mutex_unlock(&dev->fileop_lock);
586 541
587 return mask; 542 return mask;
588 } 543 }
589 } 544 }
590 545
591 up(&dev->fileop_lock); 546 mutex_unlock(&dev->fileop_lock);
592 return POLLERR; 547 return POLLERR;
593} 548}
594 549
@@ -628,25 +583,25 @@ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
628 583
629 struct em28xx *dev = filp->private_data; 584 struct em28xx *dev = filp->private_data;
630 585
631 if (down_interruptible(&dev->fileop_lock)) 586 if (mutex_lock_interruptible(&dev->fileop_lock))
632 return -ERESTARTSYS; 587 return -ERESTARTSYS;
633 588
634 if (dev->state & DEV_DISCONNECTED) { 589 if (dev->state & DEV_DISCONNECTED) {
635 em28xx_videodbg("mmap: device not present\n"); 590 em28xx_videodbg("mmap: device not present\n");
636 up(&dev->fileop_lock); 591 mutex_unlock(&dev->fileop_lock);
637 return -ENODEV; 592 return -ENODEV;
638 } 593 }
639 594
640 if (dev->state & DEV_MISCONFIGURED) { 595 if (dev->state & DEV_MISCONFIGURED) {
641 em28xx_videodbg ("mmap: Device is misconfigured; close and " 596 em28xx_videodbg ("mmap: Device is misconfigured; close and "
642 "open it again\n"); 597 "open it again\n");
643 up(&dev->fileop_lock); 598 mutex_unlock(&dev->fileop_lock);
644 return -EIO; 599 return -EIO;
645 } 600 }
646 601
647 if (dev->io != IO_MMAP || !(vma->vm_flags & VM_WRITE) || 602 if (dev->io != IO_MMAP || !(vma->vm_flags & VM_WRITE) ||
648 size != PAGE_ALIGN(dev->frame[0].buf.length)) { 603 size != PAGE_ALIGN(dev->frame[0].buf.length)) {
649 up(&dev->fileop_lock); 604 mutex_unlock(&dev->fileop_lock);
650 return -EINVAL; 605 return -EINVAL;
651 } 606 }
652 607
@@ -656,7 +611,7 @@ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
656 } 611 }
657 if (i == dev->num_frames) { 612 if (i == dev->num_frames) {
658 em28xx_videodbg("mmap: user supplied mapping address is out of range\n"); 613 em28xx_videodbg("mmap: user supplied mapping address is out of range\n");
659 up(&dev->fileop_lock); 614 mutex_unlock(&dev->fileop_lock);
660 return -EINVAL; 615 return -EINVAL;
661 } 616 }
662 617
@@ -668,7 +623,7 @@ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
668 while (size > 0) { /* size is page-aligned */ 623 while (size > 0) { /* size is page-aligned */
669 if (vm_insert_page(vma, start, vmalloc_to_page(pos))) { 624 if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
670 em28xx_videodbg("mmap: vm_insert_page failed\n"); 625 em28xx_videodbg("mmap: vm_insert_page failed\n");
671 up(&dev->fileop_lock); 626 mutex_unlock(&dev->fileop_lock);
672 return -EAGAIN; 627 return -EAGAIN;
673 } 628 }
674 start += PAGE_SIZE; 629 start += PAGE_SIZE;
@@ -680,7 +635,7 @@ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
680 vma->vm_private_data = &dev->frame[i]; 635 vma->vm_private_data = &dev->frame[i];
681 636
682 em28xx_vm_open(vma); 637 em28xx_vm_open(vma);
683 up(&dev->fileop_lock); 638 mutex_unlock(&dev->fileop_lock);
684 return 0; 639 return 0;
685} 640}
686 641
@@ -702,43 +657,6 @@ static int em28xx_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl)
702 } 657 }
703} 658}
704 659
705/*FIXME: should be moved to saa711x */
706static int saa711x_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl)
707{
708 s32 tmp;
709 switch (ctrl->id) {
710 case V4L2_CID_BRIGHTNESS:
711 if ((tmp = em28xx_brightness_get(dev)) < 0)
712 return -EIO;
713 ctrl->value = (s32) ((s8) tmp); /* FIXME: clenaer way to extend sign? */
714 return 0;
715 case V4L2_CID_CONTRAST:
716 if ((ctrl->value = em28xx_contrast_get(dev)) < 0)
717 return -EIO;
718 return 0;
719 case V4L2_CID_SATURATION:
720 if ((ctrl->value = em28xx_saturation_get(dev)) < 0)
721 return -EIO;
722 return 0;
723 case V4L2_CID_RED_BALANCE:
724 if ((tmp = em28xx_v_balance_get(dev)) < 0)
725 return -EIO;
726 ctrl->value = (s32) ((s8) tmp); /* FIXME: clenaer way to extend sign? */
727 return 0;
728 case V4L2_CID_BLUE_BALANCE:
729 if ((tmp = em28xx_u_balance_get(dev)) < 0)
730 return -EIO;
731 ctrl->value = (s32) ((s8) tmp); /* FIXME: clenaer way to extend sign? */
732 return 0;
733 case V4L2_CID_GAMMA:
734 if ((ctrl->value = em28xx_gamma_get(dev)) < 0)
735 return -EIO;
736 return 0;
737 default:
738 return -EINVAL;
739 }
740}
741
742/* 660/*
743 * em28xx_set_ctrl() 661 * em28xx_set_ctrl()
744 * mute or set new saturation, brightness or contrast 662 * mute or set new saturation, brightness or contrast
@@ -761,27 +679,6 @@ static int em28xx_set_ctrl(struct em28xx *dev, const struct v4l2_control *ctrl)
761 } 679 }
762} 680}
763 681
764/*FIXME: should be moved to saa711x */
765static int saa711x_set_ctrl(struct em28xx *dev, const struct v4l2_control *ctrl)
766{
767 switch (ctrl->id) {
768 case V4L2_CID_BRIGHTNESS:
769 return em28xx_brightness_set(dev, ctrl->value);
770 case V4L2_CID_CONTRAST:
771 return em28xx_contrast_set(dev, ctrl->value);
772 case V4L2_CID_SATURATION:
773 return em28xx_saturation_set(dev, ctrl->value);
774 case V4L2_CID_RED_BALANCE:
775 return em28xx_v_balance_set(dev, ctrl->value);
776 case V4L2_CID_BLUE_BALANCE:
777 return em28xx_u_balance_set(dev, ctrl->value);
778 case V4L2_CID_GAMMA:
779 return em28xx_gamma_set(dev, ctrl->value);
780 default:
781 return -EINVAL;
782 }
783}
784
785/* 682/*
786 * em28xx_stream_interrupt() 683 * em28xx_stream_interrupt()
787 * stops streaming 684 * stops streaming
@@ -802,7 +699,8 @@ static int em28xx_stream_interrupt(struct em28xx *dev)
802 else if (ret) { 699 else if (ret) {
803 dev->state |= DEV_MISCONFIGURED; 700 dev->state |= DEV_MISCONFIGURED;
804 em28xx_videodbg("device is misconfigured; close and " 701 em28xx_videodbg("device is misconfigured; close and "
805 "open /dev/video%d again\n", dev->vdev->minor); 702 "open /dev/video%d again\n",
703 dev->vdev->minor-MINOR_VFL_TYPE_GRABBER_MIN);
806 return ret; 704 return ret;
807 } 705 }
808 706
@@ -853,6 +751,181 @@ static int em28xx_set_norm(struct em28xx *dev, int width, int height)
853 return 0; 751 return 0;
854} 752}
855 753
754static int em28xx_get_fmt(struct em28xx *dev, struct v4l2_format *format)
755{
756 em28xx_videodbg("VIDIOC_G_FMT: type=%s\n",
757 (format->type ==V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
758 "V4L2_BUF_TYPE_VIDEO_CAPTURE" :
759 (format->type ==V4L2_BUF_TYPE_VBI_CAPTURE) ?
760 "V4L2_BUF_TYPE_VBI_CAPTURE" :
761 (format->type ==V4L2_CAP_SLICED_VBI_CAPTURE) ?
762 "V4L2_BUF_TYPE_SLICED_VBI_CAPTURE " :
763 "not supported");
764
765 switch (format->type) {
766 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
767 {
768 format->fmt.pix.width = dev->width;
769 format->fmt.pix.height = dev->height;
770 format->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
771 format->fmt.pix.bytesperline = dev->bytesperline;
772 format->fmt.pix.sizeimage = dev->frame_size;
773 format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
774 format->fmt.pix.field = dev->interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; /* FIXME: TOP? NONE? BOTTOM? ALTENATE? */
775
776 em28xx_videodbg("VIDIOC_G_FMT: %dx%d\n", dev->width,
777 dev->height);
778 break;
779 }
780
781 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
782 {
783 format->fmt.sliced.service_set=0;
784
785 em28xx_i2c_call_clients(dev,VIDIOC_G_FMT,format);
786
787 if (format->fmt.sliced.service_set==0)
788 return -EINVAL;
789
790 break;
791 }
792
793 default:
794 return -EINVAL;
795 }
796 return (0);
797}
798
799static int em28xx_set_fmt(struct em28xx *dev, unsigned int cmd, struct v4l2_format *format)
800{
801 u32 i;
802 int ret = 0;
803 int width = format->fmt.pix.width;
804 int height = format->fmt.pix.height;
805 unsigned int hscale, vscale;
806 unsigned int maxh, maxw;
807
808 maxw = norm_maxw(dev);
809 maxh = norm_maxh(dev);
810
811 em28xx_videodbg("%s: type=%s\n",
812 cmd == VIDIOC_TRY_FMT ?
813 "VIDIOC_TRY_FMT" : "VIDIOC_S_FMT",
814 format->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ?
815 "V4L2_BUF_TYPE_VIDEO_CAPTURE" :
816 format->type == V4L2_BUF_TYPE_VBI_CAPTURE ?
817 "V4L2_BUF_TYPE_VBI_CAPTURE " :
818 "not supported");
819
820 if (format->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
821 em28xx_i2c_call_clients(dev,VIDIOC_G_FMT,format);
822
823 if (format->fmt.sliced.service_set==0)
824 return -EINVAL;
825
826 return 0;
827 }
828
829
830 if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
831 return -EINVAL;
832
833 em28xx_videodbg("%s: requested %dx%d\n",
834 cmd == VIDIOC_TRY_FMT ?
835 "VIDIOC_TRY_FMT" : "VIDIOC_S_FMT",
836 format->fmt.pix.width, format->fmt.pix.height);
837
838 /* FIXME: Move some code away from here */
839 /* width must even because of the YUYV format */
840 /* height must be even because of interlacing */
841 height &= 0xfffe;
842 width &= 0xfffe;
843
844 if (height < 32)
845 height = 32;
846 if (height > maxh)
847 height = maxh;
848 if (width < 48)
849 width = 48;
850 if (width > maxw)
851 width = maxw;
852
853 if(dev->is_em2800){
854 /* the em2800 can only scale down to 50% */
855 if(height % (maxh / 2))
856 height=maxh;
857 if(width % (maxw / 2))
858 width=maxw;
859 /* according to empiatech support */
860 /* the MaxPacketSize is to small to support */
861 /* framesizes larger than 640x480 @ 30 fps */
862 /* or 640x576 @ 25 fps. As this would cut */
863 /* of a part of the image we prefer */
864 /* 360x576 or 360x480 for now */
865 if(width == maxw && height == maxh)
866 width /= 2;
867 }
868
869 if ((hscale = (((unsigned long)maxw) << 12) / width - 4096L) >= 0x4000)
870 hscale = 0x3fff;
871
872 width = (((unsigned long)maxw) << 12) / (hscale + 4096L);
873
874 if ((vscale = (((unsigned long)maxh) << 12) / height - 4096L) >= 0x4000)
875 vscale = 0x3fff;
876
877 height = (((unsigned long)maxh) << 12) / (vscale + 4096L);
878
879 format->fmt.pix.width = width;
880 format->fmt.pix.height = height;
881 format->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
882 format->fmt.pix.bytesperline = width * 2;
883 format->fmt.pix.sizeimage = width * 2 * height;
884 format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
885 format->fmt.pix.field = V4L2_FIELD_INTERLACED;
886
887 em28xx_videodbg("%s: returned %dx%d (%d, %d)\n",
888 cmd == VIDIOC_TRY_FMT ?
889 "VIDIOC_TRY_FMT" :"VIDIOC_S_FMT",
890 format->fmt.pix.width, format->fmt.pix.height, hscale, vscale);
891
892 if (cmd == VIDIOC_TRY_FMT)
893 return 0;
894
895 for (i = 0; i < dev->num_frames; i++)
896 if (dev->frame[i].vma_use_count) {
897 em28xx_videodbg("VIDIOC_S_FMT failed. "
898 "Unmap the buffers first.\n");
899 return -EINVAL;
900 }
901
902 /* stop io in case it is already in progress */
903 if (dev->stream == STREAM_ON) {
904 em28xx_videodbg("VIDIOC_SET_FMT: interupting stream\n");
905 if ((ret = em28xx_stream_interrupt(dev)))
906 return ret;
907 }
908
909 em28xx_release_buffers(dev);
910 dev->io = IO_NONE;
911
912 /* set new image size */
913 dev->width = width;
914 dev->height = height;
915 dev->frame_size = dev->width * dev->height * 2;
916 dev->field_size = dev->frame_size >> 1;
917 dev->bytesperline = dev->width * 2;
918 dev->hscale = hscale;
919 dev->vscale = vscale;
920 em28xx_uninit_isoc(dev);
921 em28xx_set_alternate(dev);
922 em28xx_capture_start(dev, 1);
923 em28xx_resolution_set(dev);
924 em28xx_init_isoc(dev);
925
926 return 0;
927}
928
856/* 929/*
857 * em28xx_v4l2_do_ioctl() 930 * em28xx_v4l2_do_ioctl()
858 * This function is _not_ called directly, but from 931 * This function is _not_ called directly, but from
@@ -868,392 +941,325 @@ static int em28xx_do_ioctl(struct inode *inode, struct file *filp,
868 switch (cmd) { 941 switch (cmd) {
869 /* ---------- tv norms ---------- */ 942 /* ---------- tv norms ---------- */
870 case VIDIOC_ENUMSTD: 943 case VIDIOC_ENUMSTD:
871 { 944 {
872 struct v4l2_standard *e = arg; 945 struct v4l2_standard *e = arg;
873 unsigned int i; 946 unsigned int i;
874 947
875 i = e->index; 948 i = e->index;
876 if (i >= TVNORMS) 949 if (i >= TVNORMS)
877 return -EINVAL; 950 return -EINVAL;
878 ret = v4l2_video_std_construct(e, tvnorms[e->index].id, 951 ret = v4l2_video_std_construct(e, tvnorms[e->index].id,
879 tvnorms[e->index].name); 952 tvnorms[e->index].name);
880 e->index = i; 953 e->index = i;
881 if (ret < 0) 954 if (ret < 0)
882 return ret; 955 return ret;
883 return 0; 956 return 0;
884 } 957 }
885 case VIDIOC_G_STD: 958 case VIDIOC_G_STD:
886 { 959 {
887 v4l2_std_id *id = arg; 960 v4l2_std_id *id = arg;
888 961
889 *id = dev->tvnorm->id; 962 *id = dev->tvnorm->id;
890 return 0; 963 return 0;
891 } 964 }
892 case VIDIOC_S_STD: 965 case VIDIOC_S_STD:
893 { 966 {
894 v4l2_std_id *id = arg; 967 v4l2_std_id *id = arg;
895 unsigned int i; 968 unsigned int i;
896 969
970 for (i = 0; i < TVNORMS; i++)
971 if (*id == tvnorms[i].id)
972 break;
973 if (i == TVNORMS)
897 for (i = 0; i < TVNORMS; i++) 974 for (i = 0; i < TVNORMS; i++)
898 if (*id == tvnorms[i].id) 975 if (*id & tvnorms[i].id)
899 break; 976 break;
900 if (i == TVNORMS) 977 if (i == TVNORMS)
901 for (i = 0; i < TVNORMS; i++) 978 return -EINVAL;
902 if (*id & tvnorms[i].id)
903 break;
904 if (i == TVNORMS)
905 return -EINVAL;
906
907 down(&dev->lock);
908 dev->tvnorm = &tvnorms[i];
909 979
910 em28xx_set_norm(dev, dev->width, dev->height); 980 mutex_lock(&dev->lock);
981 dev->tvnorm = &tvnorms[i];
911 982
912/* 983 em28xx_set_norm(dev, dev->width, dev->height);
913 dev->width=norm_maxw(dev);
914 dev->height=norm_maxh(dev);
915 dev->frame_size=dev->width*dev->height*2;
916 dev->field_size=dev->frame_size>>1;
917 dev->bytesperline=dev->width*2;
918 dev->hscale=0;
919 dev->vscale=0;
920 984
921 em28xx_resolution_set(dev); 985 em28xx_i2c_call_clients(dev, VIDIOC_S_STD,
922*/ 986 &dev->tvnorm->id);
923/*
924 em28xx_uninit_isoc(dev);
925 em28xx_set_alternate(dev);
926 em28xx_capture_start(dev, 1);
927 em28xx_resolution_set(dev);
928 em28xx_init_isoc(dev);
929*/
930 em28xx_i2c_call_clients(dev, DECODER_SET_NORM,
931 &tvnorms[i].mode);
932 em28xx_i2c_call_clients(dev, VIDIOC_S_STD,
933 &dev->tvnorm->id);
934 987
935 up(&dev->lock); 988 mutex_unlock(&dev->lock);
936 989
937 return 0; 990 return 0;
938 } 991 }
939 992
940 /* ------ input switching ---------- */ 993 /* ------ input switching ---------- */
941 case VIDIOC_ENUMINPUT: 994 case VIDIOC_ENUMINPUT:
942 { 995 {
943 struct v4l2_input *i = arg; 996 struct v4l2_input *i = arg;
944 unsigned int n; 997 unsigned int n;
945 static const char *iname[] = { 998 static const char *iname[] = {
946 [EM28XX_VMUX_COMPOSITE1] = "Composite1", 999 [EM28XX_VMUX_COMPOSITE1] = "Composite1",
947 [EM28XX_VMUX_COMPOSITE2] = "Composite2", 1000 [EM28XX_VMUX_COMPOSITE2] = "Composite2",
948 [EM28XX_VMUX_COMPOSITE3] = "Composite3", 1001 [EM28XX_VMUX_COMPOSITE3] = "Composite3",
949 [EM28XX_VMUX_COMPOSITE4] = "Composite4", 1002 [EM28XX_VMUX_COMPOSITE4] = "Composite4",
950 [EM28XX_VMUX_SVIDEO] = "S-Video", 1003 [EM28XX_VMUX_SVIDEO] = "S-Video",
951 [EM28XX_VMUX_TELEVISION] = "Television", 1004 [EM28XX_VMUX_TELEVISION] = "Television",
952 [EM28XX_VMUX_CABLE] = "Cable TV", 1005 [EM28XX_VMUX_CABLE] = "Cable TV",
953 [EM28XX_VMUX_DVB] = "DVB", 1006 [EM28XX_VMUX_DVB] = "DVB",
954 [EM28XX_VMUX_DEBUG] = "for debug only", 1007 [EM28XX_VMUX_DEBUG] = "for debug only",
955 }; 1008 };
956 1009
957 n = i->index; 1010 n = i->index;
958 if (n >= MAX_EM28XX_INPUT) 1011 if (n >= MAX_EM28XX_INPUT)
959 return -EINVAL; 1012 return -EINVAL;
960 if (0 == INPUT(n)->type) 1013 if (0 == INPUT(n)->type)
961 return -EINVAL; 1014 return -EINVAL;
962 memset(i, 0, sizeof(*i)); 1015 memset(i, 0, sizeof(*i));
963 i->index = n; 1016 i->index = n;
964 i->type = V4L2_INPUT_TYPE_CAMERA; 1017 i->type = V4L2_INPUT_TYPE_CAMERA;
965 strcpy(i->name, iname[INPUT(n)->type]); 1018 strcpy(i->name, iname[INPUT(n)->type]);
966 if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) || 1019 if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) ||
967 (EM28XX_VMUX_CABLE == INPUT(n)->type)) 1020 (EM28XX_VMUX_CABLE == INPUT(n)->type))
968 i->type = V4L2_INPUT_TYPE_TUNER; 1021 i->type = V4L2_INPUT_TYPE_TUNER;
969 for (n = 0; n < ARRAY_SIZE(tvnorms); n++) 1022 for (n = 0; n < ARRAY_SIZE(tvnorms); n++)
970 i->std |= tvnorms[n].id; 1023 i->std |= tvnorms[n].id;
971 return 0; 1024 return 0;
972 } 1025 }
973
974 case VIDIOC_G_INPUT: 1026 case VIDIOC_G_INPUT:
975 { 1027 {
976 int *i = arg; 1028 int *i = arg;
977 *i = dev->ctl_input; 1029 *i = dev->ctl_input;
978
979 return 0;
980 }
981 1030
1031 return 0;
1032 }
982 case VIDIOC_S_INPUT: 1033 case VIDIOC_S_INPUT:
983 { 1034 {
984 int *index = arg; 1035 int *index = arg;
985
986 if (*index >= MAX_EM28XX_INPUT)
987 return -EINVAL;
988 if (0 == INPUT(*index)->type)
989 return -EINVAL;
990 1036
991 down(&dev->lock); 1037 if (*index >= MAX_EM28XX_INPUT)
992 video_mux(dev, *index); 1038 return -EINVAL;
993 up(&dev->lock); 1039 if (0 == INPUT(*index)->type)
1040 return -EINVAL;
994 1041
995 return 0; 1042 mutex_lock(&dev->lock);
996 } 1043 video_mux(dev, *index);
1044 mutex_unlock(&dev->lock);
997 1045
1046 return 0;
1047 }
998 case VIDIOC_G_AUDIO: 1048 case VIDIOC_G_AUDIO:
999 { 1049 {
1000 struct v4l2_audio *a = arg; 1050 struct v4l2_audio *a = arg;
1001 unsigned int index = a->index; 1051 unsigned int index = a->index;
1002 1052
1003 if (a->index > 1) 1053 if (a->index > 1)
1004 return -EINVAL; 1054 return -EINVAL;
1005 memset(a, 0, sizeof(*a)); 1055 memset(a, 0, sizeof(*a));
1006 index = dev->ctl_ainput; 1056 index = dev->ctl_ainput;
1007 1057
1008 if (index == 0) { 1058 if (index == 0) {
1009 strcpy(a->name, "Television"); 1059 strcpy(a->name, "Television");
1010 } else { 1060 } else {
1011 strcpy(a->name, "Line In"); 1061 strcpy(a->name, "Line In");
1012 }
1013 a->capability = V4L2_AUDCAP_STEREO;
1014 a->index = index;
1015 return 0;
1016 } 1062 }
1017 1063 a->capability = V4L2_AUDCAP_STEREO;
1064 a->index = index;
1065 return 0;
1066 }
1018 case VIDIOC_S_AUDIO: 1067 case VIDIOC_S_AUDIO:
1019 { 1068 {
1020 struct v4l2_audio *a = arg; 1069 struct v4l2_audio *a = arg;
1021 if (a->index != dev->ctl_ainput)
1022 return -EINVAL;
1023 1070
1024 return 0; 1071 if (a->index != dev->ctl_ainput)
1025 } 1072 return -EINVAL;
1026 1073
1027 /* --- controls ---------------------------------------------- */ 1074 return 0;
1075 }
1076
1077 /* --- controls ---------------------------------------------- */
1028 case VIDIOC_QUERYCTRL: 1078 case VIDIOC_QUERYCTRL:
1029 { 1079 {
1030 struct v4l2_queryctrl *qc = arg; 1080 struct v4l2_queryctrl *qc = arg;
1031 int i, id=qc->id; 1081 int i, id=qc->id;
1032
1033 memset(qc,0,sizeof(*qc));
1034 qc->id=id;
1035
1036 if (!dev->has_msp34xx) {
1037 for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) {
1038 if (qc->id && qc->id == em28xx_qctrl[i].id) {
1039 memcpy(qc, &(em28xx_qctrl[i]),
1040 sizeof(*qc));
1041 return 0;
1042 }
1043 }
1044 }
1045 if (dev->decoder == EM28XX_TVP5150) {
1046 em28xx_i2c_call_clients(dev,cmd,qc);
1047 if (qc->type)
1048 return 0;
1049 else
1050 return -EINVAL;
1051 }
1052 for (i = 0; i < ARRAY_SIZE(saa711x_qctrl); i++) {
1053 if (qc->id && qc->id == saa711x_qctrl[i].id) {
1054 memcpy(qc, &(saa711x_qctrl[i]),
1055 sizeof(*qc));
1056 return 0;
1057 }
1058 }
1059 1082
1060 return -EINVAL; 1083 memset(qc,0,sizeof(*qc));
1061 } 1084 qc->id=id;
1062 1085
1063 case VIDIOC_G_CTRL: 1086 if (!dev->has_msp34xx) {
1064 { 1087 for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) {
1065 struct v4l2_control *ctrl = arg; 1088 if (qc->id && qc->id == em28xx_qctrl[i].id) {
1066 int retval=-EINVAL; 1089 memcpy(qc, &(em28xx_qctrl[i]),
1067 1090 sizeof(*qc));
1068 if (!dev->has_msp34xx)
1069 retval=em28xx_get_ctrl(dev, ctrl);
1070 if (retval==-EINVAL) {
1071 if (dev->decoder == EM28XX_TVP5150) {
1072 em28xx_i2c_call_clients(dev,cmd,arg);
1073 return 0; 1091 return 0;
1074 } 1092 }
1075 1093 }
1076 return saa711x_get_ctrl(dev, ctrl);
1077 } else return retval;
1078 } 1094 }
1095 em28xx_i2c_call_clients(dev,cmd,qc);
1096 if (qc->type)
1097 return 0;
1098 else
1099 return -EINVAL;
1100 }
1101 case VIDIOC_G_CTRL:
1102 {
1103 struct v4l2_control *ctrl = arg;
1104 int retval=-EINVAL;
1079 1105
1106 if (!dev->has_msp34xx)
1107 retval=em28xx_get_ctrl(dev, ctrl);
1108 if (retval==-EINVAL) {
1109 em28xx_i2c_call_clients(dev,cmd,arg);
1110 return 0;
1111 } else return retval;
1112 }
1080 case VIDIOC_S_CTRL: 1113 case VIDIOC_S_CTRL:
1081 { 1114 {
1082 struct v4l2_control *ctrl = arg; 1115 struct v4l2_control *ctrl = arg;
1083 u8 i; 1116 u8 i;
1084 1117
1085 if (!dev->has_msp34xx){ 1118 if (!dev->has_msp34xx){
1086 for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) { 1119 for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) {
1087 if (ctrl->id == em28xx_qctrl[i].id) { 1120 if (ctrl->id == em28xx_qctrl[i].id) {
1088 if (ctrl->value < 1121 if (ctrl->value <
1089 em28xx_qctrl[i].minimum 1122 em28xx_qctrl[i].minimum
1090 || ctrl->value > 1123 || ctrl->value >
1091 em28xx_qctrl[i].maximum) 1124 em28xx_qctrl[i].maximum)
1092 return -ERANGE; 1125 return -ERANGE;
1093 return em28xx_set_ctrl(dev, ctrl); 1126 return em28xx_set_ctrl(dev, ctrl);
1094 }
1095 }
1096 }
1097
1098 if (dev->decoder == EM28XX_TVP5150) {
1099 em28xx_i2c_call_clients(dev,cmd,arg);
1100 return 0;
1101 } else if (!dev->has_msp34xx) {
1102 for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) {
1103 if (ctrl->id == em28xx_qctrl[i].id) {
1104 if (ctrl->value <
1105 em28xx_qctrl[i].minimum
1106 || ctrl->value >
1107 em28xx_qctrl[i].maximum)
1108 return -ERANGE;
1109 return em28xx_set_ctrl(dev, ctrl);
1110 }
1111 }
1112 for (i = 0; i < ARRAY_SIZE(saa711x_qctrl); i++) {
1113 if (ctrl->id == saa711x_qctrl[i].id) {
1114 if (ctrl->value <
1115 saa711x_qctrl[i].minimum
1116 || ctrl->value >
1117 saa711x_qctrl[i].maximum)
1118 return -ERANGE;
1119 return saa711x_set_ctrl(dev, ctrl);
1120 }
1121 } 1127 }
1122 } 1128 }
1123
1124 return -EINVAL;
1125 } 1129 }
1126 1130
1127 /* --- tuner ioctls ------------------------------------------ */ 1131 em28xx_i2c_call_clients(dev,cmd,arg);
1132 return 0;
1133 }
1134 /* --- tuner ioctls ------------------------------------------ */
1128 case VIDIOC_G_TUNER: 1135 case VIDIOC_G_TUNER:
1129 { 1136 {
1130 struct v4l2_tuner *t = arg; 1137 struct v4l2_tuner *t = arg;
1131 int status = 0; 1138 int status = 0;
1132 1139
1133 if (0 != t->index) 1140 if (0 != t->index)
1134 return -EINVAL; 1141 return -EINVAL;
1135 1142
1136 memset(t, 0, sizeof(*t)); 1143 memset(t, 0, sizeof(*t));
1137 strcpy(t->name, "Tuner"); 1144 strcpy(t->name, "Tuner");
1138 t->type = V4L2_TUNER_ANALOG_TV; 1145 t->type = V4L2_TUNER_ANALOG_TV;
1139 t->capability = V4L2_TUNER_CAP_NORM; 1146 t->capability = V4L2_TUNER_CAP_NORM;
1140 t->rangehigh = 0xffffffffUL; /* FIXME: set correct range */ 1147 t->rangehigh = 0xffffffffUL; /* FIXME: set correct range */
1141/* t->signal = 0xffff;*/ 1148/* t->signal = 0xffff;*/
1142/* em28xx_i2c_call_clients(dev,VIDIOC_G_TUNER,t);*/ 1149/* em28xx_i2c_call_clients(dev,VIDIOC_G_TUNER,t);*/
1143 /* No way to get signal strength? */ 1150 /* No way to get signal strength? */
1144 down(&dev->lock); 1151 mutex_lock(&dev->lock);
1145 em28xx_i2c_call_clients(dev, DECODER_GET_STATUS, 1152 em28xx_i2c_call_clients(dev, DECODER_GET_STATUS,
1146 &status); 1153 &status);
1147 up(&dev->lock); 1154 mutex_unlock(&dev->lock);
1148 t->signal = 1155 t->signal =
1149 (status & DECODER_STATUS_GOOD) != 0 ? 0xffff : 0; 1156 (status & DECODER_STATUS_GOOD) != 0 ? 0xffff : 0;
1150 1157
1151 em28xx_videodbg("VIDIO_G_TUNER: signal=%x, afc=%x\n", t->signal, 1158 em28xx_videodbg("VIDIO_G_TUNER: signal=%x, afc=%x\n", t->signal,
1152 t->afc); 1159 t->afc);
1153 return 0; 1160 return 0;
1154 } 1161 }
1155 case VIDIOC_S_TUNER: 1162 case VIDIOC_S_TUNER:
1156 { 1163 {
1157 struct v4l2_tuner *t = arg; 1164 struct v4l2_tuner *t = arg;
1158 int status = 0; 1165 int status = 0;
1159 1166
1160 if (0 != t->index) 1167 if (0 != t->index)
1161 return -EINVAL; 1168 return -EINVAL;
1162 memset(t, 0, sizeof(*t)); 1169 memset(t, 0, sizeof(*t));
1163 strcpy(t->name, "Tuner"); 1170 strcpy(t->name, "Tuner");
1164 t->type = V4L2_TUNER_ANALOG_TV; 1171 t->type = V4L2_TUNER_ANALOG_TV;
1165 t->capability = V4L2_TUNER_CAP_NORM; 1172 t->capability = V4L2_TUNER_CAP_NORM;
1166 t->rangehigh = 0xffffffffUL; /* FIXME: set correct range */ 1173 t->rangehigh = 0xffffffffUL; /* FIXME: set correct range */
1167/* t->signal = 0xffff; */ 1174/* t->signal = 0xffff; */
1168 /* No way to get signal strength? */ 1175 /* No way to get signal strength? */
1169 down(&dev->lock); 1176 mutex_lock(&dev->lock);
1170 em28xx_i2c_call_clients(dev, DECODER_GET_STATUS, 1177 em28xx_i2c_call_clients(dev, DECODER_GET_STATUS,
1171 &status); 1178 &status);
1172 up(&dev->lock); 1179 mutex_unlock(&dev->lock);
1173 t->signal = 1180 t->signal =
1174 (status & DECODER_STATUS_GOOD) != 0 ? 0xffff : 0; 1181 (status & DECODER_STATUS_GOOD) != 0 ? 0xffff : 0;
1175 1182
1176 em28xx_videodbg("VIDIO_S_TUNER: signal=%x, afc=%x\n", 1183 em28xx_videodbg("VIDIO_S_TUNER: signal=%x, afc=%x\n",
1177 t->signal, t->afc); 1184 t->signal, t->afc);
1178 return 0; 1185 return 0;
1179 } 1186 }
1180 case VIDIOC_G_FREQUENCY: 1187 case VIDIOC_G_FREQUENCY:
1181 { 1188 {
1182 struct v4l2_frequency *f = arg; 1189 struct v4l2_frequency *f = arg;
1183 1190
1184 memset(f, 0, sizeof(*f)); 1191 memset(f, 0, sizeof(*f));
1185 f->type = V4L2_TUNER_ANALOG_TV; 1192 f->type = V4L2_TUNER_ANALOG_TV;
1186 f->frequency = dev->ctl_freq; 1193 f->frequency = dev->ctl_freq;
1187 1194
1188 return 0; 1195 return 0;
1189 } 1196 }
1190 case VIDIOC_S_FREQUENCY: 1197 case VIDIOC_S_FREQUENCY:
1191 { 1198 {
1192 struct v4l2_frequency *f = arg; 1199 struct v4l2_frequency *f = arg;
1193
1194 if (0 != f->tuner)
1195 return -EINVAL;
1196 1200
1197 if (V4L2_TUNER_ANALOG_TV != f->type) 1201 if (0 != f->tuner)
1198 return -EINVAL; 1202 return -EINVAL;
1199 1203
1200 down(&dev->lock); 1204 if (V4L2_TUNER_ANALOG_TV != f->type)
1201 dev->ctl_freq = f->frequency; 1205 return -EINVAL;
1202 em28xx_i2c_call_clients(dev, VIDIOC_S_FREQUENCY, f);
1203 up(&dev->lock);
1204 return 0;
1205 }
1206 1206
1207 mutex_lock(&dev->lock);
1208 dev->ctl_freq = f->frequency;
1209 em28xx_i2c_call_clients(dev, VIDIOC_S_FREQUENCY, f);
1210 mutex_unlock(&dev->lock);
1211 return 0;
1212 }
1207 case VIDIOC_CROPCAP: 1213 case VIDIOC_CROPCAP:
1208 { 1214 {
1209 struct v4l2_cropcap *cc = arg; 1215 struct v4l2_cropcap *cc = arg;
1210 1216
1211 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1217 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1212 return -EINVAL; 1218 return -EINVAL;
1213 cc->bounds.left = 0; 1219 cc->bounds.left = 0;
1214 cc->bounds.top = 0; 1220 cc->bounds.top = 0;
1215 cc->bounds.width = dev->width; 1221 cc->bounds.width = dev->width;
1216 cc->bounds.height = dev->height; 1222 cc->bounds.height = dev->height;
1217 cc->defrect = cc->bounds; 1223 cc->defrect = cc->bounds;
1218 cc->pixelaspect.numerator = 54; /* 4:3 FIXME: remove magic numbers */ 1224 cc->pixelaspect.numerator = 54; /* 4:3 FIXME: remove magic numbers */
1219 cc->pixelaspect.denominator = 59; 1225 cc->pixelaspect.denominator = 59;
1220 return 0; 1226 return 0;
1221 } 1227 }
1222 case VIDIOC_STREAMON: 1228 case VIDIOC_STREAMON:
1223 { 1229 {
1224 int *type = arg; 1230 int *type = arg;
1225 1231
1226 if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE 1232 if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE
1227 || dev->io != IO_MMAP) 1233 || dev->io != IO_MMAP)
1228 return -EINVAL; 1234 return -EINVAL;
1229 1235
1230 if (list_empty(&dev->inqueue)) 1236 if (list_empty(&dev->inqueue))
1231 return -EINVAL; 1237 return -EINVAL;
1232 1238
1233 dev->stream = STREAM_ON; /* FIXME: Start video capture here? */ 1239 dev->stream = STREAM_ON; /* FIXME: Start video capture here? */
1234 1240
1235 em28xx_videodbg("VIDIOC_STREAMON: starting stream\n"); 1241 em28xx_videodbg("VIDIOC_STREAMON: starting stream\n");
1236 1242
1237 return 0; 1243 return 0;
1238 } 1244 }
1239 case VIDIOC_STREAMOFF: 1245 case VIDIOC_STREAMOFF:
1240 { 1246 {
1241 int *type = arg; 1247 int *type = arg;
1242 int ret; 1248 int ret;
1243
1244 if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE
1245 || dev->io != IO_MMAP)
1246 return -EINVAL;
1247 1249
1248 if (dev->stream == STREAM_ON) { 1250 if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE
1249 em28xx_videodbg ("VIDIOC_STREAMOFF: interrupting stream\n"); 1251 || dev->io != IO_MMAP)
1250 if ((ret = em28xx_stream_interrupt(dev))) 1252 return -EINVAL;
1251 return ret;
1252 }
1253 em28xx_empty_framequeues(dev);
1254 1253
1255 return 0; 1254 if (dev->stream == STREAM_ON) {
1255 em28xx_videodbg ("VIDIOC_STREAMOFF: interrupting stream\n");
1256 if ((ret = em28xx_stream_interrupt(dev)))
1257 return ret;
1256 } 1258 }
1259 em28xx_empty_framequeues(dev);
1260
1261 return 0;
1262 }
1257 default: 1263 default:
1258 return v4l_compat_translate_ioctl(inode, filp, cmd, arg, 1264 return v4l_compat_translate_ioctl(inode, filp, cmd, arg,
1259 driver_ioctl); 1265 driver_ioctl);
@@ -1283,327 +1289,170 @@ static int em28xx_video_do_ioctl(struct inode *inode, struct file *filp,
1283 /* --- capabilities ------------------------------------------ */ 1289 /* --- capabilities ------------------------------------------ */
1284 case VIDIOC_QUERYCAP: 1290 case VIDIOC_QUERYCAP:
1285 { 1291 {
1286 struct v4l2_capability *cap = arg; 1292 struct v4l2_capability *cap = arg;
1287 1293
1288 memset(cap, 0, sizeof(*cap)); 1294 memset(cap, 0, sizeof(*cap));
1289 strlcpy(cap->driver, "em28xx", sizeof(cap->driver)); 1295 strlcpy(cap->driver, "em28xx", sizeof(cap->driver));
1290 strlcpy(cap->card, em28xx_boards[dev->model].name, 1296 strlcpy(cap->card, em28xx_boards[dev->model].name,
1291 sizeof(cap->card)); 1297 sizeof(cap->card));
1292 strlcpy(cap->bus_info, dev->udev->dev.bus_id, 1298 strlcpy(cap->bus_info, dev->udev->dev.bus_id,
1293 sizeof(cap->bus_info)); 1299 sizeof(cap->bus_info));
1294 cap->version = EM28XX_VERSION_CODE; 1300 cap->version = EM28XX_VERSION_CODE;
1295 cap->capabilities = 1301 cap->capabilities =
1296 V4L2_CAP_VIDEO_CAPTURE | 1302 V4L2_CAP_SLICED_VBI_CAPTURE |
1297 V4L2_CAP_AUDIO | 1303 V4L2_CAP_VIDEO_CAPTURE |
1298 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; 1304 V4L2_CAP_AUDIO |
1299 if (dev->has_tuner) 1305 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
1300 cap->capabilities |= V4L2_CAP_TUNER; 1306 if (dev->has_tuner)
1301 return 0; 1307 cap->capabilities |= V4L2_CAP_TUNER;
1302 } 1308 return 0;
1303 1309 }
1304 /* --- capture ioctls ---------------------------------------- */ 1310 /* --- capture ioctls ---------------------------------------- */
1305 case VIDIOC_ENUM_FMT: 1311 case VIDIOC_ENUM_FMT:
1306 { 1312 {
1307 struct v4l2_fmtdesc *fmtd = arg; 1313 struct v4l2_fmtdesc *fmtd = arg;
1308
1309 if (fmtd->index != 0)
1310 return -EINVAL;
1311 memset(fmtd, 0, sizeof(*fmtd));
1312 fmtd->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1313 strcpy(fmtd->description, "Packed YUY2");
1314 fmtd->pixelformat = V4L2_PIX_FMT_YUYV;
1315 memset(fmtd->reserved, 0, sizeof(fmtd->reserved));
1316 return 0;
1317 }
1318 1314
1315 if (fmtd->index != 0)
1316 return -EINVAL;
1317 memset(fmtd, 0, sizeof(*fmtd));
1318 fmtd->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1319 strcpy(fmtd->description, "Packed YUY2");
1320 fmtd->pixelformat = V4L2_PIX_FMT_YUYV;
1321 memset(fmtd->reserved, 0, sizeof(fmtd->reserved));
1322 return 0;
1323 }
1319 case VIDIOC_G_FMT: 1324 case VIDIOC_G_FMT:
1320 { 1325 return em28xx_get_fmt(dev, (struct v4l2_format *) arg);
1321 struct v4l2_format *format = arg;
1322
1323 em28xx_videodbg("VIDIOC_G_FMT: type=%s\n",
1324 format->type ==
1325 V4L2_BUF_TYPE_VIDEO_CAPTURE ?
1326 "V4L2_BUF_TYPE_VIDEO_CAPTURE" : format->type ==
1327 V4L2_BUF_TYPE_VBI_CAPTURE ?
1328 "V4L2_BUF_TYPE_VBI_CAPTURE " :
1329 "not supported");
1330
1331 if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1332 return -EINVAL;
1333
1334 format->fmt.pix.width = dev->width;
1335 format->fmt.pix.height = dev->height;
1336 format->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
1337 format->fmt.pix.bytesperline = dev->bytesperline;
1338 format->fmt.pix.sizeimage = dev->frame_size;
1339 format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
1340 format->fmt.pix.field = dev->interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; /* FIXME: TOP? NONE? BOTTOM? ALTENATE? */
1341
1342 em28xx_videodbg("VIDIOC_G_FMT: %dx%d\n", dev->width,
1343 dev->height);
1344 return 0;
1345 }
1346 1326
1347 case VIDIOC_TRY_FMT: 1327 case VIDIOC_TRY_FMT:
1348 case VIDIOC_S_FMT: 1328 case VIDIOC_S_FMT:
1349 { 1329 return em28xx_set_fmt(dev, cmd, (struct v4l2_format *)arg);
1350 struct v4l2_format *format = arg;
1351 u32 i;
1352 int ret = 0;
1353 int width = format->fmt.pix.width;
1354 int height = format->fmt.pix.height;
1355 unsigned int hscale, vscale;
1356 unsigned int maxh, maxw;
1357
1358 maxw = norm_maxw(dev);
1359 maxh = norm_maxh(dev);
1360
1361/* int both_fields; */
1362
1363 em28xx_videodbg("%s: type=%s\n",
1364 cmd ==
1365 VIDIOC_TRY_FMT ? "VIDIOC_TRY_FMT" :
1366 "VIDIOC_S_FMT",
1367 format->type ==
1368 V4L2_BUF_TYPE_VIDEO_CAPTURE ?
1369 "V4L2_BUF_TYPE_VIDEO_CAPTURE" : format->type ==
1370 V4L2_BUF_TYPE_VBI_CAPTURE ?
1371 "V4L2_BUF_TYPE_VBI_CAPTURE " :
1372 "not supported");
1373
1374 if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1375 return -EINVAL;
1376
1377 em28xx_videodbg("%s: requested %dx%d\n",
1378 cmd ==
1379 VIDIOC_TRY_FMT ? "VIDIOC_TRY_FMT" :
1380 "VIDIOC_S_FMT", format->fmt.pix.width,
1381 format->fmt.pix.height);
1382
1383 /* FIXME: Move some code away from here */
1384 /* width must even because of the YUYV format */
1385 /* height must be even because of interlacing */
1386 height &= 0xfffe;
1387 width &= 0xfffe;
1388
1389 if (height < 32)
1390 height = 32;
1391 if (height > maxh)
1392 height = maxh;
1393 if (width < 48)
1394 width = 48;
1395 if (width > maxw)
1396 width = maxw;
1397
1398 if(dev->is_em2800){
1399 /* the em2800 can only scale down to 50% */
1400 if(height % (maxh / 2))
1401 height=maxh;
1402 if(width % (maxw / 2))
1403 width=maxw;
1404 /* according to empiatech support */
1405 /* the MaxPacketSize is to small to support */
1406 /* framesizes larger than 640x480 @ 30 fps */
1407 /* or 640x576 @ 25 fps. As this would cut */
1408 /* of a part of the image we prefer */
1409 /* 360x576 or 360x480 for now */
1410 if(width == maxw && height == maxh)
1411 width /= 2;
1412 }
1413
1414 if ((hscale =
1415 (((unsigned long)maxw) << 12) / width - 4096L) >=
1416 0x4000)
1417 hscale = 0x3fff;
1418 width =
1419 (((unsigned long)maxw) << 12) / (hscale + 4096L);
1420
1421 if ((vscale =
1422 (((unsigned long)maxh) << 12) / height - 4096L) >=
1423 0x4000)
1424 vscale = 0x3fff;
1425 height =
1426 (((unsigned long)maxh) << 12) / (vscale + 4096L);
1427
1428 format->fmt.pix.width = width;
1429 format->fmt.pix.height = height;
1430 format->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
1431 format->fmt.pix.bytesperline = width * 2;
1432 format->fmt.pix.sizeimage = width * 2 * height;
1433 format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
1434 format->fmt.pix.field = V4L2_FIELD_INTERLACED;
1435
1436 em28xx_videodbg("%s: returned %dx%d (%d, %d)\n",
1437 cmd ==
1438 VIDIOC_TRY_FMT ? "VIDIOC_TRY_FMT" :
1439 "VIDIOC_S_FMT", format->fmt.pix.width,
1440 format->fmt.pix.height, hscale, vscale);
1441
1442 if (cmd == VIDIOC_TRY_FMT)
1443 return 0;
1444
1445 for (i = 0; i < dev->num_frames; i++)
1446 if (dev->frame[i].vma_use_count) {
1447 em28xx_videodbg("VIDIOC_S_FMT failed. "
1448 "Unmap the buffers first.\n");
1449 return -EINVAL;
1450 }
1451 1330
1452 /* stop io in case it is already in progress */ 1331 case VIDIOC_REQBUFS:
1453 if (dev->stream == STREAM_ON) { 1332 {
1454 em28xx_videodbg("VIDIOC_SET_FMT: interupting stream\n"); 1333 struct v4l2_requestbuffers *rb = arg;
1455 if ((ret = em28xx_stream_interrupt(dev))) 1334 u32 i;
1456 return ret; 1335 int ret;
1457 }
1458 1336
1459 em28xx_release_buffers(dev); 1337 if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1460 dev->io = IO_NONE; 1338 rb->memory != V4L2_MEMORY_MMAP)
1461 1339 return -EINVAL;
1462 /* set new image size */
1463 dev->width = width;
1464 dev->height = height;
1465 dev->frame_size = dev->width * dev->height * 2;
1466 dev->field_size = dev->frame_size >> 1; /*both_fileds ? dev->frame_size>>1 : dev->frame_size; */
1467 dev->bytesperline = dev->width * 2;
1468 dev->hscale = hscale;
1469 dev->vscale = vscale;
1470/* dev->both_fileds = both_fileds; */
1471 em28xx_uninit_isoc(dev);
1472 em28xx_set_alternate(dev);
1473 em28xx_capture_start(dev, 1);
1474 em28xx_resolution_set(dev);
1475 em28xx_init_isoc(dev);
1476 1340
1477 return 0; 1341 if (dev->io == IO_READ) {
1342 em28xx_videodbg ("method is set to read;"
1343 " close and open the device again to"
1344 " choose the mmap I/O method\n");
1345 return -EINVAL;
1478 } 1346 }
1479 1347
1480 /* --- streaming capture ------------------------------------- */ 1348 for (i = 0; i < dev->num_frames; i++)
1481 case VIDIOC_REQBUFS: 1349 if (dev->frame[i].vma_use_count) {
1482 { 1350 em28xx_videodbg ("VIDIOC_REQBUFS failed; previous buffers are still mapped\n");
1483 struct v4l2_requestbuffers *rb = arg;
1484 u32 i;
1485 int ret;
1486
1487 if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1488 rb->memory != V4L2_MEMORY_MMAP)
1489 return -EINVAL;
1490
1491 if (dev->io == IO_READ) {
1492 em28xx_videodbg ("method is set to read;"
1493 " close and open the device again to"
1494 " choose the mmap I/O method\n");
1495 return -EINVAL; 1351 return -EINVAL;
1496 } 1352 }
1497 1353
1498 for (i = 0; i < dev->num_frames; i++) 1354 if (dev->stream == STREAM_ON) {
1499 if (dev->frame[i].vma_use_count) { 1355 em28xx_videodbg("VIDIOC_REQBUFS: interrupting stream\n");
1500 em28xx_videodbg ("VIDIOC_REQBUFS failed; previous buffers are still mapped\n"); 1356 if ((ret = em28xx_stream_interrupt(dev)))
1501 return -EINVAL; 1357 return ret;
1502 } 1358 }
1503
1504 if (dev->stream == STREAM_ON) {
1505 em28xx_videodbg("VIDIOC_REQBUFS: interrupting stream\n");
1506 if ((ret = em28xx_stream_interrupt(dev)))
1507 return ret;
1508 }
1509
1510 em28xx_empty_framequeues(dev);
1511 1359
1512 em28xx_release_buffers(dev); 1360 em28xx_empty_framequeues(dev);
1513 if (rb->count)
1514 rb->count =
1515 em28xx_request_buffers(dev, rb->count);
1516 1361
1517 dev->frame_current = NULL; 1362 em28xx_release_buffers(dev);
1363 if (rb->count)
1364 rb->count =
1365 em28xx_request_buffers(dev, rb->count);
1518 1366
1519 em28xx_videodbg ("VIDIOC_REQBUFS: setting io method to mmap: num bufs %i\n", 1367 dev->frame_current = NULL;
1520 rb->count);
1521 dev->io = rb->count ? IO_MMAP : IO_NONE;
1522 return 0;
1523 }
1524 1368
1369 em28xx_videodbg ("VIDIOC_REQBUFS: setting io method to mmap: num bufs %i\n",
1370 rb->count);
1371 dev->io = rb->count ? IO_MMAP : IO_NONE;
1372 return 0;
1373 }
1525 case VIDIOC_QUERYBUF: 1374 case VIDIOC_QUERYBUF:
1526 { 1375 {
1527 struct v4l2_buffer *b = arg; 1376 struct v4l2_buffer *b = arg;
1528 1377
1529 if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 1378 if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1530 b->index >= dev->num_frames || dev->io != IO_MMAP) 1379 b->index >= dev->num_frames || dev->io != IO_MMAP)
1531 return -EINVAL; 1380 return -EINVAL;
1532 1381
1533 memcpy(b, &dev->frame[b->index].buf, sizeof(*b)); 1382 memcpy(b, &dev->frame[b->index].buf, sizeof(*b));
1534 1383
1535 if (dev->frame[b->index].vma_use_count) { 1384 if (dev->frame[b->index].vma_use_count) {
1536 b->flags |= V4L2_BUF_FLAG_MAPPED; 1385 b->flags |= V4L2_BUF_FLAG_MAPPED;
1537 }
1538 if (dev->frame[b->index].state == F_DONE)
1539 b->flags |= V4L2_BUF_FLAG_DONE;
1540 else if (dev->frame[b->index].state != F_UNUSED)
1541 b->flags |= V4L2_BUF_FLAG_QUEUED;
1542 return 0;
1543 } 1386 }
1387 if (dev->frame[b->index].state == F_DONE)
1388 b->flags |= V4L2_BUF_FLAG_DONE;
1389 else if (dev->frame[b->index].state != F_UNUSED)
1390 b->flags |= V4L2_BUF_FLAG_QUEUED;
1391 return 0;
1392 }
1544 case VIDIOC_QBUF: 1393 case VIDIOC_QBUF:
1545 { 1394 {
1546 struct v4l2_buffer *b = arg; 1395 struct v4l2_buffer *b = arg;
1547 unsigned long lock_flags; 1396 unsigned long lock_flags;
1548 1397
1549 if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 1398 if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1550 b->index >= dev->num_frames || dev->io != IO_MMAP) { 1399 b->index >= dev->num_frames || dev->io != IO_MMAP) {
1551 return -EINVAL; 1400 return -EINVAL;
1552 } 1401 }
1553 1402
1554 if (dev->frame[b->index].state != F_UNUSED) { 1403 if (dev->frame[b->index].state != F_UNUSED) {
1555 return -EAGAIN; 1404 return -EAGAIN;
1556 } 1405 }
1557 dev->frame[b->index].state = F_QUEUED; 1406 dev->frame[b->index].state = F_QUEUED;
1558 1407
1559 /* add frame to fifo */ 1408 /* add frame to fifo */
1560 spin_lock_irqsave(&dev->queue_lock, lock_flags); 1409 spin_lock_irqsave(&dev->queue_lock, lock_flags);
1561 list_add_tail(&dev->frame[b->index].frame, 1410 list_add_tail(&dev->frame[b->index].frame,
1562 &dev->inqueue); 1411 &dev->inqueue);
1563 spin_unlock_irqrestore(&dev->queue_lock, lock_flags); 1412 spin_unlock_irqrestore(&dev->queue_lock, lock_flags);
1564 1413
1565 return 0; 1414 return 0;
1566 } 1415 }
1567 case VIDIOC_DQBUF: 1416 case VIDIOC_DQBUF:
1568 { 1417 {
1569 struct v4l2_buffer *b = arg; 1418 struct v4l2_buffer *b = arg;
1570 struct em28xx_frame_t *f; 1419 struct em28xx_frame_t *f;
1571 unsigned long lock_flags; 1420 unsigned long lock_flags;
1572 int ret = 0; 1421 int ret = 0;
1573 1422
1574 if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE 1423 if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE
1575 || dev->io != IO_MMAP) 1424 || dev->io != IO_MMAP)
1576 return -EINVAL; 1425 return -EINVAL;
1577 1426
1578 if (list_empty(&dev->outqueue)) { 1427 if (list_empty(&dev->outqueue)) {
1579 if (dev->stream == STREAM_OFF) 1428 if (dev->stream == STREAM_OFF)
1580 return -EINVAL; 1429 return -EINVAL;
1581 if (filp->f_flags & O_NONBLOCK) 1430 if (filp->f_flags & O_NONBLOCK)
1582 return -EAGAIN; 1431 return -EAGAIN;
1583 ret = wait_event_interruptible 1432 ret = wait_event_interruptible
1584 (dev->wait_frame, 1433 (dev->wait_frame,
1585 (!list_empty(&dev->outqueue)) || 1434 (!list_empty(&dev->outqueue)) ||
1586 (dev->state & DEV_DISCONNECTED)); 1435 (dev->state & DEV_DISCONNECTED));
1587 if (ret) 1436 if (ret)
1588 return ret; 1437 return ret;
1589 if (dev->state & DEV_DISCONNECTED) 1438 if (dev->state & DEV_DISCONNECTED)
1590 return -ENODEV; 1439 return -ENODEV;
1591 } 1440 }
1592 1441
1593 spin_lock_irqsave(&dev->queue_lock, lock_flags); 1442 spin_lock_irqsave(&dev->queue_lock, lock_flags);
1594 f = list_entry(dev->outqueue.next, 1443 f = list_entry(dev->outqueue.next,
1595 struct em28xx_frame_t, frame); 1444 struct em28xx_frame_t, frame);
1596 list_del(dev->outqueue.next); 1445 list_del(dev->outqueue.next);
1597 spin_unlock_irqrestore(&dev->queue_lock, lock_flags); 1446 spin_unlock_irqrestore(&dev->queue_lock, lock_flags);
1598 1447
1599 f->state = F_UNUSED; 1448 f->state = F_UNUSED;
1600 memcpy(b, &f->buf, sizeof(*b)); 1449 memcpy(b, &f->buf, sizeof(*b));
1601 1450
1602 if (f->vma_use_count) 1451 if (f->vma_use_count)
1603 b->flags |= V4L2_BUF_FLAG_MAPPED; 1452 b->flags |= V4L2_BUF_FLAG_MAPPED;
1604 1453
1605 return 0; 1454 return 0;
1606 } 1455 }
1607 default: 1456 default:
1608 return em28xx_do_ioctl(inode, filp, dev, cmd, arg, 1457 return em28xx_do_ioctl(inode, filp, dev, cmd, arg,
1609 em28xx_video_do_ioctl); 1458 em28xx_video_do_ioctl);
@@ -1621,25 +1470,25 @@ static int em28xx_v4l2_ioctl(struct inode *inode, struct file *filp,
1621 int ret = 0; 1470 int ret = 0;
1622 struct em28xx *dev = filp->private_data; 1471 struct em28xx *dev = filp->private_data;
1623 1472
1624 if (down_interruptible(&dev->fileop_lock)) 1473 if (mutex_lock_interruptible(&dev->fileop_lock))
1625 return -ERESTARTSYS; 1474 return -ERESTARTSYS;
1626 1475
1627 if (dev->state & DEV_DISCONNECTED) { 1476 if (dev->state & DEV_DISCONNECTED) {
1628 em28xx_errdev("v4l2 ioctl: device not present\n"); 1477 em28xx_errdev("v4l2 ioctl: device not present\n");
1629 up(&dev->fileop_lock); 1478 mutex_unlock(&dev->fileop_lock);
1630 return -ENODEV; 1479 return -ENODEV;
1631 } 1480 }
1632 1481
1633 if (dev->state & DEV_MISCONFIGURED) { 1482 if (dev->state & DEV_MISCONFIGURED) {
1634 em28xx_errdev 1483 em28xx_errdev
1635 ("v4l2 ioctl: device is misconfigured; close and open it again\n"); 1484 ("v4l2 ioctl: device is misconfigured; close and open it again\n");
1636 up(&dev->fileop_lock); 1485 mutex_unlock(&dev->fileop_lock);
1637 return -EIO; 1486 return -EIO;
1638 } 1487 }
1639 1488
1640 ret = video_usercopy(inode, filp, cmd, arg, em28xx_video_do_ioctl); 1489 ret = video_usercopy(inode, filp, cmd, arg, em28xx_video_do_ioctl);
1641 1490
1642 up(&dev->fileop_lock); 1491 mutex_unlock(&dev->fileop_lock);
1643 1492
1644 return ret; 1493 return ret;
1645} 1494}
@@ -1673,7 +1522,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1673 1522
1674 dev->udev = udev; 1523 dev->udev = udev;
1675 dev->model = model; 1524 dev->model = model;
1676 init_MUTEX(&dev->lock); 1525 mutex_init(&dev->lock);
1677 init_waitqueue_head(&dev->open); 1526 init_waitqueue_head(&dev->open);
1678 1527
1679 dev->em28xx_write_regs = em28xx_write_regs; 1528 dev->em28xx_write_regs = em28xx_write_regs;
@@ -1729,10 +1578,11 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1729 dev->vpic.depth = 16; 1578 dev->vpic.depth = 16;
1730 dev->vpic.palette = VIDEO_PALETTE_YUV422; 1579 dev->vpic.palette = VIDEO_PALETTE_YUV422;
1731 1580
1581 em28xx_pre_card_setup(dev);
1732#ifdef CONFIG_MODULES 1582#ifdef CONFIG_MODULES
1733 /* request some modules */ 1583 /* request some modules */
1734 if (dev->decoder == EM28XX_SAA7113 || dev->decoder == EM28XX_SAA7114) 1584 if (dev->decoder == EM28XX_SAA7113 || dev->decoder == EM28XX_SAA7114)
1735 request_module("saa711x"); 1585 request_module("saa7115");
1736 if (dev->decoder == EM28XX_TVP5150) 1586 if (dev->decoder == EM28XX_TVP5150)
1737 request_module("tvp5150"); 1587 request_module("tvp5150");
1738 if (dev->has_tuner) 1588 if (dev->has_tuner)
@@ -1744,10 +1594,11 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1744 if (errCode) { 1594 if (errCode) {
1745 em28xx_errdev("error configuring device\n"); 1595 em28xx_errdev("error configuring device\n");
1746 kfree(dev); 1596 kfree(dev);
1597 em28xx_devused&=~(1<<dev->devno);
1747 return -ENOMEM; 1598 return -ENOMEM;
1748 } 1599 }
1749 1600
1750 down(&dev->lock); 1601 mutex_lock(&dev->lock);
1751 /* register i2c bus */ 1602 /* register i2c bus */
1752 em28xx_i2c_register(dev); 1603 em28xx_i2c_register(dev);
1753 1604
@@ -1757,7 +1608,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1757 /* configure the device */ 1608 /* configure the device */
1758 em28xx_config_i2c(dev); 1609 em28xx_config_i2c(dev);
1759 1610
1760 up(&dev->lock); 1611 mutex_unlock(&dev->lock);
1761 1612
1762 errCode = em28xx_config(dev); 1613 errCode = em28xx_config(dev);
1763 1614
@@ -1770,9 +1621,30 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1770 if (NULL == dev->vdev) { 1621 if (NULL == dev->vdev) {
1771 em28xx_errdev("cannot allocate video_device.\n"); 1622 em28xx_errdev("cannot allocate video_device.\n");
1772 kfree(dev); 1623 kfree(dev);
1624 em28xx_devused&=~(1<<dev->devno);
1773 return -ENOMEM; 1625 return -ENOMEM;
1774 } 1626 }
1775 1627
1628 dev->vbi_dev = video_device_alloc();
1629 if (NULL == dev->vbi_dev) {
1630 em28xx_errdev("cannot allocate video_device.\n");
1631 kfree(dev->vdev);
1632 kfree(dev);
1633 em28xx_devused&=~(1<<dev->devno);
1634 return -ENOMEM;
1635 }
1636
1637 /* Fills VBI device info */
1638 dev->vbi_dev->type = VFL_TYPE_VBI;
1639 dev->vbi_dev->hardware = 0;
1640 dev->vbi_dev->fops = &em28xx_v4l_fops;
1641 dev->vbi_dev->minor = -1;
1642 dev->vbi_dev->dev = &dev->udev->dev;
1643 dev->vbi_dev->release = video_device_release;
1644 snprintf(dev->vbi_dev->name, sizeof(dev->vbi_dev->name), "%s#%d %s",
1645 "em28xx",dev->devno,"vbi");
1646
1647 /* Fills CAPTURE device info */
1776 dev->vdev->type = VID_TYPE_CAPTURE; 1648 dev->vdev->type = VID_TYPE_CAPTURE;
1777 if (dev->has_tuner) 1649 if (dev->has_tuner)
1778 dev->vdev->type |= VID_TYPE_TUNER; 1650 dev->vdev->type |= VID_TYPE_TUNER;
@@ -1781,21 +1653,39 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1781 dev->vdev->minor = -1; 1653 dev->vdev->minor = -1;
1782 dev->vdev->dev = &dev->udev->dev; 1654 dev->vdev->dev = &dev->udev->dev;
1783 dev->vdev->release = video_device_release; 1655 dev->vdev->release = video_device_release;
1784 snprintf(dev->vdev->name, sizeof(dev->vdev->name), "%s", 1656 snprintf(dev->vdev->name, sizeof(dev->vbi_dev->name), "%s#%d %s",
1785 "em28xx video"); 1657 "em28xx",dev->devno,"video");
1658
1786 list_add_tail(&dev->devlist,&em28xx_devlist); 1659 list_add_tail(&dev->devlist,&em28xx_devlist);
1787 1660
1788 /* register v4l2 device */ 1661 /* register v4l2 device */
1789 down(&dev->lock); 1662 mutex_lock(&dev->lock);
1790 if ((retval = video_register_device(dev->vdev, VFL_TYPE_GRABBER, -1))) { 1663 if ((retval = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
1664 video_nr[dev->devno]))) {
1791 em28xx_errdev("unable to register video device (error=%i).\n", 1665 em28xx_errdev("unable to register video device (error=%i).\n",
1792 retval); 1666 retval);
1793 up(&dev->lock); 1667 mutex_unlock(&dev->lock);
1794 list_del(&dev->devlist); 1668 list_del(&dev->devlist);
1795 video_device_release(dev->vdev); 1669 video_device_release(dev->vdev);
1796 kfree(dev); 1670 kfree(dev);
1671 em28xx_devused&=~(1<<dev->devno);
1797 return -ENODEV; 1672 return -ENODEV;
1798 } 1673 }
1674
1675 if (video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
1676 vbi_nr[dev->devno]) < 0) {
1677 printk("unable to register vbi device\n");
1678 mutex_unlock(&dev->lock);
1679 list_del(&dev->devlist);
1680 video_device_release(dev->vbi_dev);
1681 video_device_release(dev->vdev);
1682 kfree(dev);
1683 em28xx_devused&=~(1<<dev->devno);
1684 return -ENODEV;
1685 } else {
1686 printk("registered VBI\n");
1687 }
1688
1799 if (dev->has_msp34xx) { 1689 if (dev->has_msp34xx) {
1800 /* Send a reset to other chips via gpio */ 1690 /* Send a reset to other chips via gpio */
1801 em28xx_write_regs_req(dev, 0x00, 0x08, "\xf7", 1); 1691 em28xx_write_regs_req(dev, 0x00, 0x08, "\xf7", 1);
@@ -1806,10 +1696,11 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1806 } 1696 }
1807 video_mux(dev, 0); 1697 video_mux(dev, 0);
1808 1698
1809 up(&dev->lock); 1699 mutex_unlock(&dev->lock);
1810 1700
1811 em28xx_info("V4L2 device registered as /dev/video%d\n", 1701 em28xx_info("V4L2 device registered as /dev/video%d and /dev/vbi%d\n",
1812 dev->vdev->minor); 1702 dev->vdev->minor-MINOR_VFL_TYPE_GRABBER_MIN,
1703 dev->vbi_dev->minor-MINOR_VFL_TYPE_VBI_MIN);
1813 1704
1814 return 0; 1705 return 0;
1815} 1706}
@@ -1831,6 +1722,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
1831 udev = usb_get_dev(interface_to_usbdev(interface)); 1722 udev = usb_get_dev(interface_to_usbdev(interface));
1832 ifnum = interface->altsetting[0].desc.bInterfaceNumber; 1723 ifnum = interface->altsetting[0].desc.bInterfaceNumber;
1833 1724
1725 /* Check to see next free device and mark as used */
1726 nr=find_first_zero_bit(&em28xx_devused,EM28XX_MAXBOARDS);
1727 em28xx_devused|=1<<nr;
1834 1728
1835 /* Don't register audio interfaces */ 1729 /* Don't register audio interfaces */
1836 if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) { 1730 if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
@@ -1838,6 +1732,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
1838 udev->descriptor.idVendor,udev->descriptor.idProduct, 1732 udev->descriptor.idVendor,udev->descriptor.idProduct,
1839 ifnum, 1733 ifnum,
1840 interface->altsetting[0].desc.bInterfaceClass); 1734 interface->altsetting[0].desc.bInterfaceClass);
1735
1736 em28xx_devused&=~(1<<nr);
1841 return -ENODEV; 1737 return -ENODEV;
1842 } 1738 }
1843 1739
@@ -1852,18 +1748,20 @@ static int em28xx_usb_probe(struct usb_interface *interface,
1852 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 1748 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
1853 USB_ENDPOINT_XFER_ISOC) { 1749 USB_ENDPOINT_XFER_ISOC) {
1854 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n"); 1750 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n");
1751 em28xx_devused&=~(1<<nr);
1855 return -ENODEV; 1752 return -ENODEV;
1856 } 1753 }
1857 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) { 1754 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) {
1858 em28xx_err(DRIVER_NAME " probing error: endpoint is ISO OUT endpoint!\n"); 1755 em28xx_err(DRIVER_NAME " probing error: endpoint is ISO OUT endpoint!\n");
1756 em28xx_devused&=~(1<<nr);
1859 return -ENODEV; 1757 return -ENODEV;
1860 } 1758 }
1861 1759
1862 model=id->driver_info; 1760 model=id->driver_info;
1863 nr=interface->minor;
1864 1761
1865 if (nr>EM28XX_MAXBOARDS) { 1762 if (nr >= EM28XX_MAXBOARDS) {
1866 printk (DRIVER_NAME ": Supports only %i em28xx boards.\n",EM28XX_MAXBOARDS); 1763 printk (DRIVER_NAME ": Supports only %i em28xx boards.\n",EM28XX_MAXBOARDS);
1764 em28xx_devused&=~(1<<nr);
1867 return -ENOMEM; 1765 return -ENOMEM;
1868 } 1766 }
1869 1767
@@ -1871,19 +1769,24 @@ static int em28xx_usb_probe(struct usb_interface *interface,
1871 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1769 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1872 if (dev == NULL) { 1770 if (dev == NULL) {
1873 em28xx_err(DRIVER_NAME ": out of memory!\n"); 1771 em28xx_err(DRIVER_NAME ": out of memory!\n");
1772 em28xx_devused&=~(1<<nr);
1874 return -ENOMEM; 1773 return -ENOMEM;
1875 } 1774 }
1876 1775
1776 snprintf(dev->name, 29, "em28xx #%d", nr);
1777 dev->devno=nr;
1778
1877 /* compute alternate max packet sizes */ 1779 /* compute alternate max packet sizes */
1878 uif = udev->actconfig->interface[0]; 1780 uif = udev->actconfig->interface[0];
1879 1781
1880 dev->num_alt=uif->num_altsetting; 1782 dev->num_alt=uif->num_altsetting;
1881 printk(DRIVER_NAME ": Alternate settings: %i\n",dev->num_alt); 1783 em28xx_info("Alternate settings: %i\n",dev->num_alt);
1882// dev->alt_max_pkt_size = kmalloc(sizeof(*dev->alt_max_pkt_size)* 1784// dev->alt_max_pkt_size = kmalloc(sizeof(*dev->alt_max_pkt_size)*
1883 dev->alt_max_pkt_size = kmalloc(32* 1785 dev->alt_max_pkt_size = kmalloc(32*
1884 dev->num_alt,GFP_KERNEL); 1786 dev->num_alt,GFP_KERNEL);
1885 if (dev->alt_max_pkt_size == NULL) { 1787 if (dev->alt_max_pkt_size == NULL) {
1886 em28xx_err(DRIVER_NAME ": out of memory!\n"); 1788 em28xx_errdev("out of memory!\n");
1789 em28xx_devused&=~(1<<nr);
1887 return -ENOMEM; 1790 return -ENOMEM;
1888 } 1791 }
1889 1792
@@ -1892,27 +1795,26 @@ static int em28xx_usb_probe(struct usb_interface *interface,
1892 wMaxPacketSize); 1795 wMaxPacketSize);
1893 dev->alt_max_pkt_size[i] = 1796 dev->alt_max_pkt_size[i] =
1894 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); 1797 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
1895 printk(DRIVER_NAME ": Alternate setting %i, max size= %i\n",i, 1798 em28xx_info("Alternate setting %i, max size= %i\n",i,
1896 dev->alt_max_pkt_size[i]); 1799 dev->alt_max_pkt_size[i]);
1897 } 1800 }
1898 1801
1899 snprintf(dev->name, 29, "em28xx #%d", nr);
1900
1901 if ((card[nr]>=0)&&(card[nr]<em28xx_bcount)) 1802 if ((card[nr]>=0)&&(card[nr]<em28xx_bcount))
1902 model=card[nr]; 1803 model=card[nr];
1903 1804
1904 if ((model==EM2800_BOARD_UNKNOWN)||(model==EM2820_BOARD_UNKNOWN)) { 1805 if ((model==EM2800_BOARD_UNKNOWN)||(model==EM2820_BOARD_UNKNOWN)) {
1905 printk( "%s: Your board has no eeprom inside it and thus can't\n" 1806 em28xx_errdev( "Your board has no eeprom inside it and thus can't\n"
1906 "%s: be autodetected. Please pass card=<n> insmod option to\n" 1807 "%s: be autodetected. Please pass card=<n> insmod option to\n"
1907 "%s: workaround that. Redirect complaints to the vendor of\n" 1808 "%s: workaround that. Redirect complaints to the vendor of\n"
1908 "%s: the TV card. Best regards,\n" 1809 "%s: the TV card. Generic type will be used."
1810 "%s: Best regards,\n"
1909 "%s: -- tux\n", 1811 "%s: -- tux\n",
1910 dev->name,dev->name,dev->name,dev->name,dev->name); 1812 dev->name,dev->name,dev->name,dev->name,dev->name);
1911 printk("%s: Here is a list of valid choices for the card=<n> insmod option:\n", 1813 em28xx_errdev("%s: Here is a list of valid choices for the card=<n> insmod option:\n",
1912 dev->name); 1814 dev->name);
1913 for (i = 0; i < em28xx_bcount; i++) { 1815 for (i = 0; i < em28xx_bcount; i++) {
1914 printk("%s: card=%d -> %s\n", 1816 em28xx_errdev(" card=%d -> %s\n", i,
1915 dev->name, i, em28xx_boards[i].name); 1817 em28xx_boards[i].name);
1916 } 1818 }
1917 } 1819 }
1918 1820
@@ -1938,15 +1840,12 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
1938 struct em28xx *dev = usb_get_intfdata(interface); 1840 struct em28xx *dev = usb_get_intfdata(interface);
1939 usb_set_intfdata(interface, NULL); 1841 usb_set_intfdata(interface, NULL);
1940 1842
1941/*FIXME: IR should be disconnected */
1942
1943 if (!dev) 1843 if (!dev)
1944 return; 1844 return;
1945 1845
1946
1947 down_write(&em28xx_disconnect); 1846 down_write(&em28xx_disconnect);
1948 1847
1949 down(&dev->lock); 1848 mutex_lock(&dev->lock);
1950 1849
1951 em28xx_info("disconnecting %s\n", dev->vdev->name); 1850 em28xx_info("disconnecting %s\n", dev->vdev->name);
1952 1851
@@ -1955,7 +1854,9 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
1955 if (dev->users) { 1854 if (dev->users) {
1956 em28xx_warn 1855 em28xx_warn
1957 ("device /dev/video%d is open! Deregistration and memory " 1856 ("device /dev/video%d is open! Deregistration and memory "
1958 "deallocation are deferred on close.\n", dev->vdev->minor); 1857 "deallocation are deferred on close.\n",
1858 dev->vdev->minor-MINOR_VFL_TYPE_GRABBER_MIN);
1859
1959 dev->state |= DEV_MISCONFIGURED; 1860 dev->state |= DEV_MISCONFIGURED;
1960 em28xx_uninit_isoc(dev); 1861 em28xx_uninit_isoc(dev);
1961 dev->state |= DEV_DISCONNECTED; 1862 dev->state |= DEV_DISCONNECTED;
@@ -1966,7 +1867,7 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
1966 em28xx_release_resources(dev); 1867 em28xx_release_resources(dev);
1967 } 1868 }
1968 1869
1969 up(&dev->lock); 1870 mutex_unlock(&dev->lock);
1970 1871
1971 if (!dev->users) { 1872 if (!dev->users) {
1972 kfree(dev->alt_max_pkt_size); 1873 kfree(dev->alt_max_pkt_size);
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 33de9d846af5..e1ddc2f27a21 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -27,6 +27,7 @@
27 27
28#include <linux/videodev.h> 28#include <linux/videodev.h>
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/mutex.h>
30#include <media/ir-kbd-i2c.h> 31#include <media/ir-kbd-i2c.h>
31 32
32/* Boards supported by driver */ 33/* Boards supported by driver */
@@ -41,6 +42,10 @@
41#define EM2800_BOARD_LEADTEK_WINFAST_USBII 7 42#define EM2800_BOARD_LEADTEK_WINFAST_USBII 7
42#define EM2800_BOARD_KWORLD_USB2800 8 43#define EM2800_BOARD_KWORLD_USB2800 8
43#define EM2820_BOARD_PINNACLE_DVC_90 9 44#define EM2820_BOARD_PINNACLE_DVC_90 9
45#define EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900 10
46#define EM2880_BOARD_TERRATEC_HYBRID_XS 11
47#define EM2820_BOARD_KWORLD_PVRTV2800RF 12
48#define EM2880_BOARD_TERRATEC_PRODIGY_XS 13
44 49
45#define UNSET -1 50#define UNSET -1
46 51
@@ -209,6 +214,7 @@ struct em28xx {
209 /* generic device properties */ 214 /* generic device properties */
210 char name[30]; /* name (including minor) of the device */ 215 char name[30]; /* name (including minor) of the device */
211 int model; /* index in the device_data struct */ 216 int model; /* index in the device_data struct */
217 int devno; /* marks the number of this device */
212 unsigned int is_em2800; 218 unsigned int is_em2800;
213 int video_inputs; /* number of video inputs */ 219 int video_inputs; /* number of video inputs */
214 struct list_head devlist; 220 struct list_head devlist;
@@ -256,7 +262,7 @@ struct em28xx {
256 enum em28xx_stream_state stream; 262 enum em28xx_stream_state stream;
257 enum em28xx_io_method io; 263 enum em28xx_io_method io;
258 /* locks */ 264 /* locks */
259 struct semaphore lock, fileop_lock; 265 struct mutex lock, fileop_lock;
260 spinlock_t queue_lock; 266 spinlock_t queue_lock;
261 struct list_head inqueue, outqueue; 267 struct list_head inqueue, outqueue;
262 wait_queue_head_t open, wait_frame, wait_stream; 268 wait_queue_head_t open, wait_frame, wait_stream;
@@ -326,6 +332,7 @@ int em28xx_set_alternate(struct em28xx *dev);
326 332
327/* Provided by em28xx-cards.c */ 333/* Provided by em28xx-cards.c */
328extern int em2800_variant_detect(struct usb_device* udev,int model); 334extern int em2800_variant_detect(struct usb_device* udev,int model);
335extern void em28xx_pre_card_setup(struct em28xx *dev);
329extern void em28xx_card_setup(struct em28xx *dev); 336extern void em28xx_card_setup(struct em28xx *dev);
330extern struct em28xx_board em28xx_boards[]; 337extern struct em28xx_board em28xx_boards[];
331extern struct usb_device_id em28xx_id_table[]; 338extern struct usb_device_id em28xx_id_table[];
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index e7bbeb11553d..c7fed3405655 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -1,9 +1,9 @@
1/* 1/*
2 hexium_gemini.c - v4l2 driver for Hexium Gemini frame grabber cards 2 hexium_gemini.c - v4l2 driver for Hexium Gemini frame grabber cards
3 3
4 Visit http://www.mihu.de/linux/saa7146/ and follow the link 4 Visit http://www.mihu.de/linux/saa7146/ and follow the link
5 to "hexium" for further details about this card. 5 to "hexium" for further details about this card.
6 6
7 Copyright (C) 2003 Michael Hunold <michael@mihu.de> 7 Copyright (C) 2003 Michael Hunold <michael@mihu.de>
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
@@ -81,7 +81,7 @@ struct hexium
81 81
82 struct video_device *video_dev; 82 struct video_device *video_dev;
83 struct i2c_adapter i2c_adapter; 83 struct i2c_adapter i2c_adapter;
84 84
85 int cur_input; /* current input */ 85 int cur_input; /* current input */
86 v4l2_std_id cur_std; /* current standard */ 86 v4l2_std_id cur_std; /* current standard */
87 int cur_bw; /* current black/white status */ 87 int cur_bw; /* current black/white status */
@@ -174,7 +174,7 @@ static struct saa7146_standard hexium_standards[] = {
174 .h_offset = 1, .h_pixels = 720, 174 .h_offset = 1, .h_pixels = 720,
175 .v_max_out = 576, .h_max_out = 768, 175 .v_max_out = 576, .h_max_out = 768,
176 } 176 }
177}; 177};
178 178
179/* bring hardware to a sane state. this has to be done, just in case someone 179/* bring hardware to a sane state. this has to be done, just in case someone
180 wants to capture from this device before it has been properly initialized. 180 wants to capture from this device before it has been properly initialized.
@@ -311,7 +311,7 @@ static int hexium_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
311 struct saa7146_dev *dev = fh->dev; 311 struct saa7146_dev *dev = fh->dev;
312 struct hexium *hexium = (struct hexium *) dev->ext_priv; 312 struct hexium *hexium = (struct hexium *) dev->ext_priv;
313/* 313/*
314 struct saa7146_vv *vv = dev->vv_data; 314 struct saa7146_vv *vv = dev->vv_data;
315*/ 315*/
316 switch (cmd) { 316 switch (cmd) {
317 case VIDIOC_ENUMINPUT: 317 case VIDIOC_ENUMINPUT:
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index aad4a18aafd6..137c4736da04 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -3,7 +3,7 @@
3 3
4 Visit http://www.mihu.de/linux/saa7146/ and follow the link 4 Visit http://www.mihu.de/linux/saa7146/ and follow the link
5 to "hexium" for further details about this card. 5 to "hexium" for further details about this card.
6 6
7 Copyright (C) 2003 Michael Hunold <michael@mihu.de> 7 Copyright (C) 2003 Michael Hunold <michael@mihu.de>
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
@@ -69,7 +69,7 @@ struct hexium
69{ 69{
70 int type; 70 int type;
71 struct video_device *video_dev; 71 struct video_device *video_dev;
72 struct i2c_adapter i2c_adapter; 72 struct i2c_adapter i2c_adapter;
73 73
74 int cur_input; /* current input */ 74 int cur_input; /* current input */
75}; 75};
@@ -86,7 +86,7 @@ static u8 hexium_saa7110[53]={
86}; 86};
87 87
88static struct { 88static struct {
89 struct hexium_data data[8]; 89 struct hexium_data data[8];
90} hexium_input_select[] = { 90} hexium_input_select[] = {
91{ 91{
92 { /* cvbs 1 */ 92 { /* cvbs 1 */
@@ -153,7 +153,7 @@ static struct {
153 { 0x30, 0x60 }, 153 { 0x30, 0x60 },
154 { 0x31, 0xB5 }, // ?? 154 { 0x31, 0xB5 }, // ??
155 { 0x21, 0x03 }, 155 { 0x21, 0x03 },
156 } 156 }
157}, { 157}, {
158 { /* y/c 1 */ 158 { /* y/c 1 */
159 { 0x06, 0x80 }, 159 { 0x06, 0x80 },
@@ -187,7 +187,7 @@ static struct {
187 { 0x31, 0x75 }, 187 { 0x31, 0x75 },
188 { 0x21, 0x21 }, 188 { 0x21, 0x21 },
189 } 189 }
190} 190}
191}; 191};
192 192
193static struct saa7146_standard hexium_standards[] = { 193static struct saa7146_standard hexium_standards[] = {
@@ -207,7 +207,7 @@ static struct saa7146_standard hexium_standards[] = {
207 .h_offset = 1, .h_pixels = 720, 207 .h_offset = 1, .h_pixels = 720,
208 .v_max_out = 576, .h_max_out = 768, 208 .v_max_out = 576, .h_max_out = 768,
209 } 209 }
210}; 210};
211 211
212/* this is only called for old HV-PCI6/Orion cards 212/* this is only called for old HV-PCI6/Orion cards
213 without eeprom */ 213 without eeprom */
@@ -272,7 +272,7 @@ static int hexium_probe(struct saa7146_dev *dev)
272 return 0; 272 return 0;
273 } 273 }
274 274
275 /* check if this is an old hexium Orion card by looking at 275 /* check if this is an old hexium Orion card by looking at
276 a saa7110 at address 0x4e */ 276 a saa7110 at address 0x4e */
277 if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) { 277 if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) {
278 printk("hexium_orion: device is a Hexium HV-PCI6/Orion (old).\n"); 278 printk("hexium_orion: device is a Hexium HV-PCI6/Orion (old).\n");
@@ -314,7 +314,7 @@ static int hexium_set_input(struct hexium *hexium, int input)
314{ 314{
315 union i2c_smbus_data data; 315 union i2c_smbus_data data;
316 int i = 0; 316 int i = 0;
317 317
318 DEB_D((".\n")); 318 DEB_D((".\n"));
319 319
320 for (i = 0; i < 8; i++) { 320 for (i = 0; i < 8; i++) {
@@ -375,7 +375,7 @@ static int hexium_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
375 struct saa7146_dev *dev = fh->dev; 375 struct saa7146_dev *dev = fh->dev;
376 struct hexium *hexium = (struct hexium *) dev->ext_priv; 376 struct hexium *hexium = (struct hexium *) dev->ext_priv;
377/* 377/*
378 struct saa7146_vv *vv = dev->vv_data; 378 struct saa7146_vv *vv = dev->vv_data;
379*/ 379*/
380 switch (cmd) { 380 switch (cmd) {
381 case VIDIOC_ENUMINPUT: 381 case VIDIOC_ENUMINPUT:
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 58b0e6982822..95bacf435414 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -44,51 +44,17 @@
44#include <media/ir-common.h> 44#include <media/ir-common.h>
45#include <media/ir-kbd-i2c.h> 45#include <media/ir-kbd-i2c.h>
46 46
47/* Mark Phalan <phalanm@o2.ie> */
48static IR_KEYTAB_TYPE ir_codes_pv951[IR_KEYTAB_SIZE] = {
49 [ 0 ] = KEY_KP0,
50 [ 1 ] = KEY_KP1,
51 [ 2 ] = KEY_KP2,
52 [ 3 ] = KEY_KP3,
53 [ 4 ] = KEY_KP4,
54 [ 5 ] = KEY_KP5,
55 [ 6 ] = KEY_KP6,
56 [ 7 ] = KEY_KP7,
57 [ 8 ] = KEY_KP8,
58 [ 9 ] = KEY_KP9,
59
60 [ 18 ] = KEY_POWER,
61 [ 16 ] = KEY_MUTE,
62 [ 31 ] = KEY_VOLUMEDOWN,
63 [ 27 ] = KEY_VOLUMEUP,
64 [ 26 ] = KEY_CHANNELUP,
65 [ 30 ] = KEY_CHANNELDOWN,
66 [ 14 ] = KEY_PAGEUP,
67 [ 29 ] = KEY_PAGEDOWN,
68 [ 19 ] = KEY_SOUND,
69
70 [ 24 ] = KEY_KPPLUSMINUS, /* CH +/- */
71 [ 22 ] = KEY_SUBTITLE, /* CC */
72 [ 13 ] = KEY_TEXT, /* TTX */
73 [ 11 ] = KEY_TV, /* AIR/CBL */
74 [ 17 ] = KEY_PC, /* PC/TV */
75 [ 23 ] = KEY_OK, /* CH RTN */
76 [ 25 ] = KEY_MODE, /* FUNC */
77 [ 12 ] = KEY_SEARCH, /* AUTOSCAN */
78
79 /* Not sure what to do with these ones! */
80 [ 15 ] = KEY_SELECT, /* SOURCE */
81 [ 10 ] = KEY_KPPLUS, /* +100 */
82 [ 20 ] = KEY_KPEQUAL, /* SYNC */
83 [ 28 ] = KEY_MEDIA, /* PC/TV */
84};
85
86/* ----------------------------------------------------------------------- */ 47/* ----------------------------------------------------------------------- */
87/* insmod parameters */ 48/* insmod parameters */
88 49
89static int debug; 50static int debug;
90module_param(debug, int, 0644); /* debug level (0,1,2) */ 51module_param(debug, int, 0644); /* debug level (0,1,2) */
91 52
53static int hauppauge = 0;
54module_param(hauppauge, int, 0644); /* Choose Hauppauge remote */
55MODULE_PARM_DESC(hauppauge, "Specify Hauppauge remote: 0=black, 1=grey (defaults to 0)");
56
57
92#define DEVNAME "ir-kbd-i2c" 58#define DEVNAME "ir-kbd-i2c"
93#define dprintk(level, fmt, arg...) if (debug >= level) \ 59#define dprintk(level, fmt, arg...) if (debug >= level) \
94 printk(KERN_DEBUG DEVNAME ": " fmt , ## arg) 60 printk(KERN_DEBUG DEVNAME ": " fmt , ## arg)
@@ -336,7 +302,11 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
336 name = "Hauppauge"; 302 name = "Hauppauge";
337 ir->get_key = get_key_haup; 303 ir->get_key = get_key_haup;
338 ir_type = IR_TYPE_RC5; 304 ir_type = IR_TYPE_RC5;
339 ir_codes = ir_codes_rc5_tv; 305 if (hauppauge == 1) {
306 ir_codes = ir_codes_hauppauge_new;
307 } else {
308 ir_codes = ir_codes_rc5_tv;
309 }
340 break; 310 break;
341 case 0x30: 311 case 0x30:
342 name = "KNC One"; 312 name = "KNC One";
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 2869464aee0d..850bee97090c 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -925,7 +925,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
925 return -EINVAL; 925 return -EINVAL;
926 if (p->palette != VIDEO_PALETTE_YUV422) 926 if (p->palette != VIDEO_PALETTE_YUV422)
927 return -EINVAL; 927 return -EINVAL;
928 down(&meye.lock); 928 mutex_lock(&meye.lock);
929 sonypi_camera_command(SONYPI_COMMAND_SETCAMERABRIGHTNESS, 929 sonypi_camera_command(SONYPI_COMMAND_SETCAMERABRIGHTNESS,
930 p->brightness >> 10); 930 p->brightness >> 10);
931 sonypi_camera_command(SONYPI_COMMAND_SETCAMERAHUE, 931 sonypi_camera_command(SONYPI_COMMAND_SETCAMERAHUE,
@@ -935,7 +935,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
935 sonypi_camera_command(SONYPI_COMMAND_SETCAMERACONTRAST, 935 sonypi_camera_command(SONYPI_COMMAND_SETCAMERACONTRAST,
936 p->contrast >> 10); 936 p->contrast >> 10);
937 meye.picture = *p; 937 meye.picture = *p;
938 up(&meye.lock); 938 mutex_unlock(&meye.lock);
939 break; 939 break;
940 } 940 }
941 941
@@ -946,21 +946,21 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
946 if (*i < 0 || *i >= gbuffers) 946 if (*i < 0 || *i >= gbuffers)
947 return -EINVAL; 947 return -EINVAL;
948 948
949 down(&meye.lock); 949 mutex_lock(&meye.lock);
950 950
951 switch (meye.grab_buffer[*i].state) { 951 switch (meye.grab_buffer[*i].state) {
952 952
953 case MEYE_BUF_UNUSED: 953 case MEYE_BUF_UNUSED:
954 up(&meye.lock); 954 mutex_unlock(&meye.lock);
955 return -EINVAL; 955 return -EINVAL;
956 case MEYE_BUF_USING: 956 case MEYE_BUF_USING:
957 if (file->f_flags & O_NONBLOCK) { 957 if (file->f_flags & O_NONBLOCK) {
958 up(&meye.lock); 958 mutex_unlock(&meye.lock);
959 return -EAGAIN; 959 return -EAGAIN;
960 } 960 }
961 if (wait_event_interruptible(meye.proc_list, 961 if (wait_event_interruptible(meye.proc_list,
962 (meye.grab_buffer[*i].state != MEYE_BUF_USING))) { 962 (meye.grab_buffer[*i].state != MEYE_BUF_USING))) {
963 up(&meye.lock); 963 mutex_unlock(&meye.lock);
964 return -EINTR; 964 return -EINTR;
965 } 965 }
966 /* fall through */ 966 /* fall through */
@@ -968,7 +968,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
968 meye.grab_buffer[*i].state = MEYE_BUF_UNUSED; 968 meye.grab_buffer[*i].state = MEYE_BUF_UNUSED;
969 kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int)); 969 kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int));
970 } 970 }
971 up(&meye.lock); 971 mutex_unlock(&meye.lock);
972 break; 972 break;
973 } 973 }
974 974
@@ -987,7 +987,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
987 if (meye.grab_buffer[vm->frame].state != MEYE_BUF_UNUSED) 987 if (meye.grab_buffer[vm->frame].state != MEYE_BUF_UNUSED)
988 return -EBUSY; 988 return -EBUSY;
989 989
990 down(&meye.lock); 990 mutex_lock(&meye.lock);
991 if (vm->width == 640 && vm->height == 480) { 991 if (vm->width == 640 && vm->height == 480) {
992 if (meye.params.subsample) { 992 if (meye.params.subsample) {
993 meye.params.subsample = 0; 993 meye.params.subsample = 0;
@@ -999,7 +999,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
999 restart = 1; 999 restart = 1;
1000 } 1000 }
1001 } else { 1001 } else {
1002 up(&meye.lock); 1002 mutex_unlock(&meye.lock);
1003 return -EINVAL; 1003 return -EINVAL;
1004 } 1004 }
1005 1005
@@ -1007,7 +1007,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1007 mchip_continuous_start(); 1007 mchip_continuous_start();
1008 meye.grab_buffer[vm->frame].state = MEYE_BUF_USING; 1008 meye.grab_buffer[vm->frame].state = MEYE_BUF_USING;
1009 kfifo_put(meye.grabq, (unsigned char *)&vm->frame, sizeof(int)); 1009 kfifo_put(meye.grabq, (unsigned char *)&vm->frame, sizeof(int));
1010 up(&meye.lock); 1010 mutex_unlock(&meye.lock);
1011 break; 1011 break;
1012 } 1012 }
1013 1013
@@ -1039,7 +1039,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1039 return -EINVAL; 1039 return -EINVAL;
1040 if (jp->framerate > 31) 1040 if (jp->framerate > 31)
1041 return -EINVAL; 1041 return -EINVAL;
1042 down(&meye.lock); 1042 mutex_lock(&meye.lock);
1043 if (meye.params.subsample != jp->subsample || 1043 if (meye.params.subsample != jp->subsample ||
1044 meye.params.quality != jp->quality) 1044 meye.params.quality != jp->quality)
1045 mchip_hic_stop(); /* need restart */ 1045 mchip_hic_stop(); /* need restart */
@@ -1050,7 +1050,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1050 meye.params.agc); 1050 meye.params.agc);
1051 sonypi_camera_command(SONYPI_COMMAND_SETCAMERAPICTURE, 1051 sonypi_camera_command(SONYPI_COMMAND_SETCAMERAPICTURE,
1052 meye.params.picture); 1052 meye.params.picture);
1053 up(&meye.lock); 1053 mutex_unlock(&meye.lock);
1054 break; 1054 break;
1055 } 1055 }
1056 1056
@@ -1068,12 +1068,12 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1068 } 1068 }
1069 if (meye.grab_buffer[*nb].state != MEYE_BUF_UNUSED) 1069 if (meye.grab_buffer[*nb].state != MEYE_BUF_UNUSED)
1070 return -EBUSY; 1070 return -EBUSY;
1071 down(&meye.lock); 1071 mutex_lock(&meye.lock);
1072 if (meye.mchip_mode != MCHIP_HIC_MODE_CONT_COMP) 1072 if (meye.mchip_mode != MCHIP_HIC_MODE_CONT_COMP)
1073 mchip_cont_compression_start(); 1073 mchip_cont_compression_start();
1074 meye.grab_buffer[*nb].state = MEYE_BUF_USING; 1074 meye.grab_buffer[*nb].state = MEYE_BUF_USING;
1075 kfifo_put(meye.grabq, (unsigned char *)nb, sizeof(int)); 1075 kfifo_put(meye.grabq, (unsigned char *)nb, sizeof(int));
1076 up(&meye.lock); 1076 mutex_unlock(&meye.lock);
1077 break; 1077 break;
1078 } 1078 }
1079 1079
@@ -1084,20 +1084,20 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1084 if (*i < 0 || *i >= gbuffers) 1084 if (*i < 0 || *i >= gbuffers)
1085 return -EINVAL; 1085 return -EINVAL;
1086 1086
1087 down(&meye.lock); 1087 mutex_lock(&meye.lock);
1088 switch (meye.grab_buffer[*i].state) { 1088 switch (meye.grab_buffer[*i].state) {
1089 1089
1090 case MEYE_BUF_UNUSED: 1090 case MEYE_BUF_UNUSED:
1091 up(&meye.lock); 1091 mutex_unlock(&meye.lock);
1092 return -EINVAL; 1092 return -EINVAL;
1093 case MEYE_BUF_USING: 1093 case MEYE_BUF_USING:
1094 if (file->f_flags & O_NONBLOCK) { 1094 if (file->f_flags & O_NONBLOCK) {
1095 up(&meye.lock); 1095 mutex_unlock(&meye.lock);
1096 return -EAGAIN; 1096 return -EAGAIN;
1097 } 1097 }
1098 if (wait_event_interruptible(meye.proc_list, 1098 if (wait_event_interruptible(meye.proc_list,
1099 (meye.grab_buffer[*i].state != MEYE_BUF_USING))) { 1099 (meye.grab_buffer[*i].state != MEYE_BUF_USING))) {
1100 up(&meye.lock); 1100 mutex_unlock(&meye.lock);
1101 return -EINTR; 1101 return -EINTR;
1102 } 1102 }
1103 /* fall through */ 1103 /* fall through */
@@ -1106,7 +1106,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1106 kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int)); 1106 kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int));
1107 } 1107 }
1108 *i = meye.grab_buffer[*i].size; 1108 *i = meye.grab_buffer[*i].size;
1109 up(&meye.lock); 1109 mutex_unlock(&meye.lock);
1110 break; 1110 break;
1111 } 1111 }
1112 1112
@@ -1116,14 +1116,14 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1116 return -EINVAL; 1116 return -EINVAL;
1117 if (meye.grab_buffer[0].state != MEYE_BUF_UNUSED) 1117 if (meye.grab_buffer[0].state != MEYE_BUF_UNUSED)
1118 return -EBUSY; 1118 return -EBUSY;
1119 down(&meye.lock); 1119 mutex_lock(&meye.lock);
1120 meye.grab_buffer[0].state = MEYE_BUF_USING; 1120 meye.grab_buffer[0].state = MEYE_BUF_USING;
1121 mchip_take_picture(); 1121 mchip_take_picture();
1122 mchip_get_picture( 1122 mchip_get_picture(
1123 meye.grab_fbuffer, 1123 meye.grab_fbuffer,
1124 mchip_hsize() * mchip_vsize() * 2); 1124 mchip_hsize() * mchip_vsize() * 2);
1125 meye.grab_buffer[0].state = MEYE_BUF_DONE; 1125 meye.grab_buffer[0].state = MEYE_BUF_DONE;
1126 up(&meye.lock); 1126 mutex_unlock(&meye.lock);
1127 break; 1127 break;
1128 } 1128 }
1129 1129
@@ -1134,7 +1134,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1134 return -EINVAL; 1134 return -EINVAL;
1135 if (meye.grab_buffer[0].state != MEYE_BUF_UNUSED) 1135 if (meye.grab_buffer[0].state != MEYE_BUF_UNUSED)
1136 return -EBUSY; 1136 return -EBUSY;
1137 down(&meye.lock); 1137 mutex_lock(&meye.lock);
1138 meye.grab_buffer[0].state = MEYE_BUF_USING; 1138 meye.grab_buffer[0].state = MEYE_BUF_USING;
1139 *len = -1; 1139 *len = -1;
1140 while (*len == -1) { 1140 while (*len == -1) {
@@ -1142,7 +1142,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1142 *len = mchip_compress_frame(meye.grab_fbuffer, gbufsize); 1142 *len = mchip_compress_frame(meye.grab_fbuffer, gbufsize);
1143 } 1143 }
1144 meye.grab_buffer[0].state = MEYE_BUF_DONE; 1144 meye.grab_buffer[0].state = MEYE_BUF_DONE;
1145 up(&meye.lock); 1145 mutex_unlock(&meye.lock);
1146 break; 1146 break;
1147 } 1147 }
1148 1148
@@ -1285,7 +1285,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1285 case VIDIOC_S_CTRL: { 1285 case VIDIOC_S_CTRL: {
1286 struct v4l2_control *c = arg; 1286 struct v4l2_control *c = arg;
1287 1287
1288 down(&meye.lock); 1288 mutex_lock(&meye.lock);
1289 switch (c->id) { 1289 switch (c->id) {
1290 case V4L2_CID_BRIGHTNESS: 1290 case V4L2_CID_BRIGHTNESS:
1291 sonypi_camera_command( 1291 sonypi_camera_command(
@@ -1329,17 +1329,17 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1329 meye.params.framerate = c->value; 1329 meye.params.framerate = c->value;
1330 break; 1330 break;
1331 default: 1331 default:
1332 up(&meye.lock); 1332 mutex_unlock(&meye.lock);
1333 return -EINVAL; 1333 return -EINVAL;
1334 } 1334 }
1335 up(&meye.lock); 1335 mutex_unlock(&meye.lock);
1336 break; 1336 break;
1337 } 1337 }
1338 1338
1339 case VIDIOC_G_CTRL: { 1339 case VIDIOC_G_CTRL: {
1340 struct v4l2_control *c = arg; 1340 struct v4l2_control *c = arg;
1341 1341
1342 down(&meye.lock); 1342 mutex_lock(&meye.lock);
1343 switch (c->id) { 1343 switch (c->id) {
1344 case V4L2_CID_BRIGHTNESS: 1344 case V4L2_CID_BRIGHTNESS:
1345 c->value = meye.picture.brightness >> 10; 1345 c->value = meye.picture.brightness >> 10;
@@ -1369,10 +1369,10 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1369 c->value = meye.params.framerate; 1369 c->value = meye.params.framerate;
1370 break; 1370 break;
1371 default: 1371 default:
1372 up(&meye.lock); 1372 mutex_unlock(&meye.lock);
1373 return -EINVAL; 1373 return -EINVAL;
1374 } 1374 }
1375 up(&meye.lock); 1375 mutex_unlock(&meye.lock);
1376 break; 1376 break;
1377 } 1377 }
1378 1378
@@ -1469,7 +1469,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1469 f->fmt.pix.field != V4L2_FIELD_NONE) 1469 f->fmt.pix.field != V4L2_FIELD_NONE)
1470 return -EINVAL; 1470 return -EINVAL;
1471 f->fmt.pix.field = V4L2_FIELD_NONE; 1471 f->fmt.pix.field = V4L2_FIELD_NONE;
1472 down(&meye.lock); 1472 mutex_lock(&meye.lock);
1473 if (f->fmt.pix.width <= 320) { 1473 if (f->fmt.pix.width <= 320) {
1474 f->fmt.pix.width = 320; 1474 f->fmt.pix.width = 320;
1475 f->fmt.pix.height = 240; 1475 f->fmt.pix.height = 240;
@@ -1487,7 +1487,7 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1487 meye.mchip_mode = MCHIP_HIC_MODE_CONT_COMP; 1487 meye.mchip_mode = MCHIP_HIC_MODE_CONT_COMP;
1488 break; 1488 break;
1489 } 1489 }
1490 up(&meye.lock); 1490 mutex_unlock(&meye.lock);
1491 f->fmt.pix.bytesperline = f->fmt.pix.width * 2; 1491 f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
1492 f->fmt.pix.sizeimage = f->fmt.pix.height * 1492 f->fmt.pix.sizeimage = f->fmt.pix.height *
1493 f->fmt.pix.bytesperline; 1493 f->fmt.pix.bytesperline;
@@ -1509,11 +1509,11 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1509 /* already allocated, no modifications */ 1509 /* already allocated, no modifications */
1510 break; 1510 break;
1511 } 1511 }
1512 down(&meye.lock); 1512 mutex_lock(&meye.lock);
1513 if (meye.grab_fbuffer) { 1513 if (meye.grab_fbuffer) {
1514 for (i = 0; i < gbuffers; i++) 1514 for (i = 0; i < gbuffers; i++)
1515 if (meye.vma_use_count[i]) { 1515 if (meye.vma_use_count[i]) {
1516 up(&meye.lock); 1516 mutex_unlock(&meye.lock);
1517 return -EINVAL; 1517 return -EINVAL;
1518 } 1518 }
1519 rvfree(meye.grab_fbuffer, gbuffers * gbufsize); 1519 rvfree(meye.grab_fbuffer, gbuffers * gbufsize);
@@ -1525,12 +1525,12 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1525 if (!meye.grab_fbuffer) { 1525 if (!meye.grab_fbuffer) {
1526 printk(KERN_ERR "meye: v4l framebuffer allocation" 1526 printk(KERN_ERR "meye: v4l framebuffer allocation"
1527 " failed\n"); 1527 " failed\n");
1528 up(&meye.lock); 1528 mutex_unlock(&meye.lock);
1529 return -ENOMEM; 1529 return -ENOMEM;
1530 } 1530 }
1531 for (i = 0; i < gbuffers; i++) 1531 for (i = 0; i < gbuffers; i++)
1532 meye.vma_use_count[i] = 0; 1532 meye.vma_use_count[i] = 0;
1533 up(&meye.lock); 1533 mutex_unlock(&meye.lock);
1534 break; 1534 break;
1535 } 1535 }
1536 1536
@@ -1569,12 +1569,12 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1569 return -EINVAL; 1569 return -EINVAL;
1570 if (meye.grab_buffer[buf->index].state != MEYE_BUF_UNUSED) 1570 if (meye.grab_buffer[buf->index].state != MEYE_BUF_UNUSED)
1571 return -EINVAL; 1571 return -EINVAL;
1572 down(&meye.lock); 1572 mutex_lock(&meye.lock);
1573 buf->flags |= V4L2_BUF_FLAG_QUEUED; 1573 buf->flags |= V4L2_BUF_FLAG_QUEUED;
1574 buf->flags &= ~V4L2_BUF_FLAG_DONE; 1574 buf->flags &= ~V4L2_BUF_FLAG_DONE;
1575 meye.grab_buffer[buf->index].state = MEYE_BUF_USING; 1575 meye.grab_buffer[buf->index].state = MEYE_BUF_USING;
1576 kfifo_put(meye.grabq, (unsigned char *)&buf->index, sizeof(int)); 1576 kfifo_put(meye.grabq, (unsigned char *)&buf->index, sizeof(int));
1577 up(&meye.lock); 1577 mutex_unlock(&meye.lock);
1578 break; 1578 break;
1579 } 1579 }
1580 1580
@@ -1587,23 +1587,23 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1587 if (buf->memory != V4L2_MEMORY_MMAP) 1587 if (buf->memory != V4L2_MEMORY_MMAP)
1588 return -EINVAL; 1588 return -EINVAL;
1589 1589
1590 down(&meye.lock); 1590 mutex_lock(&meye.lock);
1591 if (kfifo_len(meye.doneq) == 0 && file->f_flags & O_NONBLOCK) { 1591 if (kfifo_len(meye.doneq) == 0 && file->f_flags & O_NONBLOCK) {
1592 up(&meye.lock); 1592 mutex_unlock(&meye.lock);
1593 return -EAGAIN; 1593 return -EAGAIN;
1594 } 1594 }
1595 if (wait_event_interruptible(meye.proc_list, 1595 if (wait_event_interruptible(meye.proc_list,
1596 kfifo_len(meye.doneq) != 0) < 0) { 1596 kfifo_len(meye.doneq) != 0) < 0) {
1597 up(&meye.lock); 1597 mutex_unlock(&meye.lock);
1598 return -EINTR; 1598 return -EINTR;
1599 } 1599 }
1600 if (!kfifo_get(meye.doneq, (unsigned char *)&reqnr, 1600 if (!kfifo_get(meye.doneq, (unsigned char *)&reqnr,
1601 sizeof(int))) { 1601 sizeof(int))) {
1602 up(&meye.lock); 1602 mutex_unlock(&meye.lock);
1603 return -EBUSY; 1603 return -EBUSY;
1604 } 1604 }
1605 if (meye.grab_buffer[reqnr].state != MEYE_BUF_DONE) { 1605 if (meye.grab_buffer[reqnr].state != MEYE_BUF_DONE) {
1606 up(&meye.lock); 1606 mutex_unlock(&meye.lock);
1607 return -EINVAL; 1607 return -EINVAL;
1608 } 1608 }
1609 buf->index = reqnr; 1609 buf->index = reqnr;
@@ -1616,12 +1616,12 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1616 buf->m.offset = reqnr * gbufsize; 1616 buf->m.offset = reqnr * gbufsize;
1617 buf->length = gbufsize; 1617 buf->length = gbufsize;
1618 meye.grab_buffer[reqnr].state = MEYE_BUF_UNUSED; 1618 meye.grab_buffer[reqnr].state = MEYE_BUF_UNUSED;
1619 up(&meye.lock); 1619 mutex_unlock(&meye.lock);
1620 break; 1620 break;
1621 } 1621 }
1622 1622
1623 case VIDIOC_STREAMON: { 1623 case VIDIOC_STREAMON: {
1624 down(&meye.lock); 1624 mutex_lock(&meye.lock);
1625 switch (meye.mchip_mode) { 1625 switch (meye.mchip_mode) {
1626 case MCHIP_HIC_MODE_CONT_OUT: 1626 case MCHIP_HIC_MODE_CONT_OUT:
1627 mchip_continuous_start(); 1627 mchip_continuous_start();
@@ -1630,23 +1630,23 @@ static int meye_do_ioctl(struct inode *inode, struct file *file,
1630 mchip_cont_compression_start(); 1630 mchip_cont_compression_start();
1631 break; 1631 break;
1632 default: 1632 default:
1633 up(&meye.lock); 1633 mutex_unlock(&meye.lock);
1634 return -EINVAL; 1634 return -EINVAL;
1635 } 1635 }
1636 up(&meye.lock); 1636 mutex_unlock(&meye.lock);
1637 break; 1637 break;
1638 } 1638 }
1639 1639
1640 case VIDIOC_STREAMOFF: { 1640 case VIDIOC_STREAMOFF: {
1641 int i; 1641 int i;
1642 1642
1643 down(&meye.lock); 1643 mutex_lock(&meye.lock);
1644 mchip_hic_stop(); 1644 mchip_hic_stop();
1645 kfifo_reset(meye.grabq); 1645 kfifo_reset(meye.grabq);
1646 kfifo_reset(meye.doneq); 1646 kfifo_reset(meye.doneq);
1647 for (i = 0; i < MEYE_MAX_BUFNBRS; i++) 1647 for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
1648 meye.grab_buffer[i].state = MEYE_BUF_UNUSED; 1648 meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
1649 up(&meye.lock); 1649 mutex_unlock(&meye.lock);
1650 break; 1650 break;
1651 } 1651 }
1652 1652
@@ -1672,11 +1672,11 @@ static unsigned int meye_poll(struct file *file, poll_table *wait)
1672{ 1672{
1673 unsigned int res = 0; 1673 unsigned int res = 0;
1674 1674
1675 down(&meye.lock); 1675 mutex_lock(&meye.lock);
1676 poll_wait(file, &meye.proc_list, wait); 1676 poll_wait(file, &meye.proc_list, wait);
1677 if (kfifo_len(meye.doneq)) 1677 if (kfifo_len(meye.doneq))
1678 res = POLLIN | POLLRDNORM; 1678 res = POLLIN | POLLRDNORM;
1679 up(&meye.lock); 1679 mutex_unlock(&meye.lock);
1680 return res; 1680 return res;
1681} 1681}
1682 1682
@@ -1704,9 +1704,9 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
1704 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 1704 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1705 unsigned long page, pos; 1705 unsigned long page, pos;
1706 1706
1707 down(&meye.lock); 1707 mutex_lock(&meye.lock);
1708 if (size > gbuffers * gbufsize) { 1708 if (size > gbuffers * gbufsize) {
1709 up(&meye.lock); 1709 mutex_unlock(&meye.lock);
1710 return -EINVAL; 1710 return -EINVAL;
1711 } 1711 }
1712 if (!meye.grab_fbuffer) { 1712 if (!meye.grab_fbuffer) {
@@ -1716,7 +1716,7 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
1716 meye.grab_fbuffer = rvmalloc(gbuffers*gbufsize); 1716 meye.grab_fbuffer = rvmalloc(gbuffers*gbufsize);
1717 if (!meye.grab_fbuffer) { 1717 if (!meye.grab_fbuffer) {
1718 printk(KERN_ERR "meye: v4l framebuffer allocation failed\n"); 1718 printk(KERN_ERR "meye: v4l framebuffer allocation failed\n");
1719 up(&meye.lock); 1719 mutex_unlock(&meye.lock);
1720 return -ENOMEM; 1720 return -ENOMEM;
1721 } 1721 }
1722 for (i = 0; i < gbuffers; i++) 1722 for (i = 0; i < gbuffers; i++)
@@ -1727,7 +1727,7 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
1727 while (size > 0) { 1727 while (size > 0) {
1728 page = vmalloc_to_pfn((void *)pos); 1728 page = vmalloc_to_pfn((void *)pos);
1729 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) { 1729 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) {
1730 up(&meye.lock); 1730 mutex_unlock(&meye.lock);
1731 return -EAGAIN; 1731 return -EAGAIN;
1732 } 1732 }
1733 start += PAGE_SIZE; 1733 start += PAGE_SIZE;
@@ -1744,7 +1744,7 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
1744 vma->vm_private_data = (void *) (offset / gbufsize); 1744 vma->vm_private_data = (void *) (offset / gbufsize);
1745 meye_vm_open(vma); 1745 meye_vm_open(vma);
1746 1746
1747 up(&meye.lock); 1747 mutex_unlock(&meye.lock);
1748 return 0; 1748 return 0;
1749} 1749}
1750 1750
@@ -1913,7 +1913,7 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
1913 goto outvideoreg; 1913 goto outvideoreg;
1914 } 1914 }
1915 1915
1916 init_MUTEX(&meye.lock); 1916 mutex_init(&meye.lock);
1917 init_waitqueue_head(&meye.proc_list); 1917 init_waitqueue_head(&meye.proc_list);
1918 meye.picture.depth = 16; 1918 meye.picture.depth = 16;
1919 meye.picture.palette = VIDEO_PALETTE_YUV422; 1919 meye.picture.palette = VIDEO_PALETTE_YUV422;
diff --git a/drivers/media/video/meye.h b/drivers/media/video/meye.h
index e8cd897b0d20..0d09a0e3803c 100644
--- a/drivers/media/video/meye.h
+++ b/drivers/media/video/meye.h
@@ -260,6 +260,8 @@
260 260
261/* private API definitions */ 261/* private API definitions */
262#include <linux/meye.h> 262#include <linux/meye.h>
263#include <linux/mutex.h>
264
263 265
264/* Enable jpg software correction */ 266/* Enable jpg software correction */
265#define MEYE_JPEG_CORRECTION 1 267#define MEYE_JPEG_CORRECTION 1
@@ -301,7 +303,7 @@ struct meye {
301 /* list of buffers */ 303 /* list of buffers */
302 struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS]; 304 struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS];
303 int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */ 305 int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */
304 struct semaphore lock; /* semaphore for open/mmap... */ 306 struct mutex lock; /* mutex for open/mmap... */
305 struct kfifo *grabq; /* queue for buffers to be grabbed */ 307 struct kfifo *grabq; /* queue for buffers to be grabbed */
306 spinlock_t grabq_lock; /* lock protecting the queue */ 308 spinlock_t grabq_lock; /* lock protecting the queue */
307 struct kfifo *doneq; /* queue for grabbed buffers */ 309 struct kfifo *doneq; /* queue for grabbed buffers */
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index 69ed369c2f48..11ea9765769c 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -411,9 +411,9 @@ static int msp_mode_v4l2_to_v4l1(int rxsubchans)
411 if (rxsubchans & V4L2_TUNER_SUB_STEREO) 411 if (rxsubchans & V4L2_TUNER_SUB_STEREO)
412 mode |= VIDEO_SOUND_STEREO; 412 mode |= VIDEO_SOUND_STEREO;
413 if (rxsubchans & V4L2_TUNER_SUB_LANG2) 413 if (rxsubchans & V4L2_TUNER_SUB_LANG2)
414 mode |= VIDEO_SOUND_LANG2; 414 mode |= VIDEO_SOUND_LANG2 | VIDEO_SOUND_STEREO;
415 if (rxsubchans & V4L2_TUNER_SUB_LANG1) 415 if (rxsubchans & V4L2_TUNER_SUB_LANG1)
416 mode |= VIDEO_SOUND_LANG1; 416 mode |= VIDEO_SOUND_LANG1 | VIDEO_SOUND_STEREO;
417 if (mode == 0) 417 if (mode == 0)
418 mode |= VIDEO_SOUND_MONO; 418 mode |= VIDEO_SOUND_MONO;
419 return mode; 419 return mode;
@@ -430,21 +430,6 @@ static int msp_mode_v4l1_to_v4l2(int mode)
430 return V4L2_TUNER_MODE_MONO; 430 return V4L2_TUNER_MODE_MONO;
431} 431}
432 432
433static void msp_any_detect_stereo(struct i2c_client *client)
434{
435 struct msp_state *state = i2c_get_clientdata(client);
436
437 switch (state->opmode) {
438 case OPMODE_MANUAL:
439 case OPMODE_AUTODETECT:
440 autodetect_stereo(client);
441 break;
442 case OPMODE_AUTOSELECT:
443 msp34xxg_detect_stereo(client);
444 break;
445 }
446}
447
448static struct v4l2_queryctrl msp_qctrl_std[] = { 433static struct v4l2_queryctrl msp_qctrl_std[] = {
449 { 434 {
450 .id = V4L2_CID_AUDIO_VOLUME, 435 .id = V4L2_CID_AUDIO_VOLUME,
@@ -506,22 +491,6 @@ static struct v4l2_queryctrl msp_qctrl_sound_processing[] = {
506}; 491};
507 492
508 493
509static void msp_any_set_audmode(struct i2c_client *client, int audmode)
510{
511 struct msp_state *state = i2c_get_clientdata(client);
512
513 switch (state->opmode) {
514 case OPMODE_MANUAL:
515 case OPMODE_AUTODETECT:
516 state->watch_stereo = 0;
517 msp3400c_setstereo(client, audmode);
518 break;
519 case OPMODE_AUTOSELECT:
520 msp34xxg_set_audmode(client, audmode);
521 break;
522 }
523}
524
525static int msp_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl) 494static int msp_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
526{ 495{
527 struct msp_state *state = i2c_get_clientdata(client); 496 struct msp_state *state = i2c_get_clientdata(client);
@@ -653,11 +622,10 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
653 } 622 }
654 if (scart) { 623 if (scart) {
655 state->rxsubchans = V4L2_TUNER_SUB_STEREO; 624 state->rxsubchans = V4L2_TUNER_SUB_STEREO;
656 state->audmode = V4L2_TUNER_MODE_STEREO;
657 msp_set_scart(client, scart, 0); 625 msp_set_scart(client, scart, 0);
658 msp_write_dsp(client, 0x000d, 0x1900); 626 msp_write_dsp(client, 0x000d, 0x1900);
659 if (state->opmode != OPMODE_AUTOSELECT) 627 if (state->opmode != OPMODE_AUTOSELECT)
660 msp3400c_setstereo(client, state->audmode); 628 msp_set_audmode(client);
661 } 629 }
662 msp_wake_thread(client); 630 msp_wake_thread(client);
663 break; 631 break;
@@ -671,8 +639,8 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
671 switch (state->opmode) { 639 switch (state->opmode) {
672 case OPMODE_MANUAL: 640 case OPMODE_MANUAL:
673 /* set msp3400 to FM radio mode */ 641 /* set msp3400 to FM radio mode */
674 msp3400c_setmode(client, MSP_MODE_FM_RADIO); 642 msp3400c_set_mode(client, MSP_MODE_FM_RADIO);
675 msp3400c_setcarrier(client, MSP_CARRIER(10.7), 643 msp3400c_set_carrier(client, MSP_CARRIER(10.7),
676 MSP_CARRIER(10.7)); 644 MSP_CARRIER(10.7));
677 msp_set_audio(client); 645 msp_set_audio(client);
678 break; 646 break;
@@ -706,7 +674,7 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
706 if (state->radio) 674 if (state->radio)
707 break; 675 break;
708 if (state->opmode == OPMODE_AUTOSELECT) 676 if (state->opmode == OPMODE_AUTOSELECT)
709 msp_any_detect_stereo(client); 677 msp_detect_stereo(client);
710 va->mode = msp_mode_v4l2_to_v4l1(state->rxsubchans); 678 va->mode = msp_mode_v4l2_to_v4l1(state->rxsubchans);
711 break; 679 break;
712 } 680 }
@@ -722,8 +690,9 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
722 state->treble = va->treble; 690 state->treble = va->treble;
723 msp_set_audio(client); 691 msp_set_audio(client);
724 692
725 if (va->mode != 0 && state->radio == 0) 693 if (va->mode != 0 && state->radio == 0) {
726 msp_any_set_audmode(client, msp_mode_v4l1_to_v4l2(va->mode)); 694 state->audmode = msp_mode_v4l1_to_v4l2(va->mode);
695 }
727 break; 696 break;
728 } 697 }
729 698
@@ -831,11 +800,8 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
831 return -EINVAL; 800 return -EINVAL;
832 } 801 }
833 802
834 msp_any_detect_stereo(client); 803 a->capability = V4L2_AUDCAP_STEREO;
835 if (state->audmode == V4L2_TUNER_MODE_STEREO) { 804 a->mode = 0; /* TODO: add support for AVL */
836 a->capability = V4L2_AUDCAP_STEREO;
837 }
838
839 break; 805 break;
840 } 806 }
841 807
@@ -865,16 +831,10 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
865 } 831 }
866 if (scart) { 832 if (scart) {
867 state->rxsubchans = V4L2_TUNER_SUB_STEREO; 833 state->rxsubchans = V4L2_TUNER_SUB_STEREO;
868 state->audmode = V4L2_TUNER_MODE_STEREO;
869 msp_set_scart(client, scart, 0); 834 msp_set_scart(client, scart, 0);
870 msp_write_dsp(client, 0x000d, 0x1900); 835 msp_write_dsp(client, 0x000d, 0x1900);
871 } 836 }
872 if (sarg->capability == V4L2_AUDCAP_STEREO) { 837 msp_set_audmode(client);
873 state->audmode = V4L2_TUNER_MODE_STEREO;
874 } else {
875 state->audmode &= ~V4L2_TUNER_MODE_STEREO;
876 }
877 msp_any_set_audmode(client, state->audmode);
878 msp_wake_thread(client); 838 msp_wake_thread(client);
879 break; 839 break;
880 } 840 }
@@ -886,7 +846,7 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
886 if (state->radio) 846 if (state->radio)
887 break; 847 break;
888 if (state->opmode == OPMODE_AUTOSELECT) 848 if (state->opmode == OPMODE_AUTOSELECT)
889 msp_any_detect_stereo(client); 849 msp_detect_stereo(client);
890 vt->audmode = state->audmode; 850 vt->audmode = state->audmode;
891 vt->rxsubchans = state->rxsubchans; 851 vt->rxsubchans = state->rxsubchans;
892 vt->capability = V4L2_TUNER_CAP_STEREO | 852 vt->capability = V4L2_TUNER_CAP_STEREO |
@@ -898,11 +858,11 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
898 { 858 {
899 struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; 859 struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
900 860
901 if (state->radio) 861 if (state->radio) /* TODO: add mono/stereo support for radio */
902 break; 862 break;
863 state->audmode = vt->audmode;
903 /* only set audmode */ 864 /* only set audmode */
904 if (vt->audmode != -1 && vt->audmode != 0) 865 msp_set_audmode(client);
905 msp_any_set_audmode(client, vt->audmode);
906 break; 866 break;
907 } 867 }
908 868
@@ -927,7 +887,6 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
927 return -EINVAL; 887 return -EINVAL;
928 } 888 }
929 break; 889 break;
930
931 } 890 }
932 891
933 case VIDIOC_S_AUDOUT: 892 case VIDIOC_S_AUDOUT:
@@ -993,7 +952,7 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
993 const char *p; 952 const char *p;
994 953
995 if (state->opmode == OPMODE_AUTOSELECT) 954 if (state->opmode == OPMODE_AUTOSELECT)
996 msp_any_detect_stereo(client); 955 msp_detect_stereo(client);
997 v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n", 956 v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
998 client->name, state->rev1, state->rev2); 957 client->name, state->rev1, state->rev2);
999 v4l_info(client, "Audio: volume %d%s\n", 958 v4l_info(client, "Audio: volume %d%s\n",
@@ -1094,6 +1053,7 @@ static int msp_attach(struct i2c_adapter *adapter, int address, int kind)
1094 1053
1095 memset(state, 0, sizeof(*state)); 1054 memset(state, 0, sizeof(*state));
1096 state->v4l2_std = V4L2_STD_NTSC; 1055 state->v4l2_std = V4L2_STD_NTSC;
1056 state->audmode = V4L2_TUNER_MODE_LANG1;
1097 state->volume = 58880; /* 0db gain */ 1057 state->volume = 58880; /* 0db gain */
1098 state->balance = 32768; /* 0db gain */ 1058 state->balance = 32768; /* 0db gain */
1099 state->bass = 32768; 1059 state->bass = 32768;
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index 2072c3efebb3..852ab6a115fa 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -109,7 +109,7 @@ static struct msp3400c_init_data_dem {
109 {-2, -8, -10, 10, 50, 86}, 109 {-2, -8, -10, 10, 50, 86},
110 {-4, -12, -9, 23, 79, 126}, 110 {-4, -12, -9, 23, 79, 126},
111 MSP_CARRIER(6.5), MSP_CARRIER(6.5), 111 MSP_CARRIER(6.5), MSP_CARRIER(6.5),
112 0x00c6, 0x0140, 0x0120, 0x7c03 112 0x00c6, 0x0140, 0x0120, 0x7c00
113 }, 113 },
114}; 114};
115 115
@@ -154,54 +154,60 @@ const char *msp_standard_std_name(int std)
154 return "unknown"; 154 return "unknown";
155} 155}
156 156
157void msp3400c_setcarrier(struct i2c_client *client, int cdo1, int cdo2) 157static void msp_set_source(struct i2c_client *client, u16 src)
158{
159 struct msp_state *state = i2c_get_clientdata(client);
160
161 if (msp_dolby) {
162 msp_write_dsp(client, 0x0008, 0x0520); /* I2S1 */
163 msp_write_dsp(client, 0x0009, 0x0620); /* I2S2 */
164 } else {
165 msp_write_dsp(client, 0x0008, src);
166 msp_write_dsp(client, 0x0009, src);
167 }
168 msp_write_dsp(client, 0x000a, src);
169 msp_write_dsp(client, 0x000b, src);
170 msp_write_dsp(client, 0x000c, src);
171 if (state->has_scart23_in_scart2_out)
172 msp_write_dsp(client, 0x0041, src);
173}
174
175void msp3400c_set_carrier(struct i2c_client *client, int cdo1, int cdo2)
158{ 176{
159 msp_write_dem(client, 0x0093, cdo1 & 0xfff); 177 msp_write_dem(client, 0x0093, cdo1 & 0xfff);
160 msp_write_dem(client, 0x009b, cdo1 >> 12); 178 msp_write_dem(client, 0x009b, cdo1 >> 12);
161 msp_write_dem(client, 0x00a3, cdo2 & 0xfff); 179 msp_write_dem(client, 0x00a3, cdo2 & 0xfff);
162 msp_write_dem(client, 0x00ab, cdo2 >> 12); 180 msp_write_dem(client, 0x00ab, cdo2 >> 12);
163 msp_write_dem(client, 0x0056, 0); /*LOAD_REG_1/2*/ 181 msp_write_dem(client, 0x0056, 0); /* LOAD_REG_1/2 */
164} 182}
165 183
166void msp3400c_setmode(struct i2c_client *client, int type) 184void msp3400c_set_mode(struct i2c_client *client, int mode)
167{ 185{
168 struct msp_state *state = i2c_get_clientdata(client); 186 struct msp_state *state = i2c_get_clientdata(client);
187 struct msp3400c_init_data_dem *data = &msp3400c_init_data[mode];
169 int i; 188 int i;
170 189
171 v4l_dbg(1, msp_debug, client, "setmode: %d\n", type); 190 v4l_dbg(1, msp_debug, client, "set_mode: %d\n", mode);
172 state->mode = type; 191 state->mode = mode;
173 state->audmode = V4L2_TUNER_MODE_MONO;
174 state->rxsubchans = V4L2_TUNER_SUB_MONO; 192 state->rxsubchans = V4L2_TUNER_SUB_MONO;
175 193
176 msp_write_dem(client, 0x00bb, msp3400c_init_data[type].ad_cv); 194 msp_write_dem(client, 0x00bb, data->ad_cv);
177 195
178 for (i = 5; i >= 0; i--) /* fir 1 */ 196 for (i = 5; i >= 0; i--) /* fir 1 */
179 msp_write_dem(client, 0x0001, msp3400c_init_data[type].fir1[i]); 197 msp_write_dem(client, 0x0001, data->fir1[i]);
180 198
181 msp_write_dem(client, 0x0005, 0x0004); /* fir 2 */ 199 msp_write_dem(client, 0x0005, 0x0004); /* fir 2 */
182 msp_write_dem(client, 0x0005, 0x0040); 200 msp_write_dem(client, 0x0005, 0x0040);
183 msp_write_dem(client, 0x0005, 0x0000); 201 msp_write_dem(client, 0x0005, 0x0000);
184 for (i = 5; i >= 0; i--) 202 for (i = 5; i >= 0; i--)
185 msp_write_dem(client, 0x0005, msp3400c_init_data[type].fir2[i]); 203 msp_write_dem(client, 0x0005, data->fir2[i]);
186 204
187 msp_write_dem(client, 0x0083, msp3400c_init_data[type].mode_reg); 205 msp_write_dem(client, 0x0083, data->mode_reg);
188 206
189 msp3400c_setcarrier(client, msp3400c_init_data[type].cdo1, 207 msp3400c_set_carrier(client, data->cdo1, data->cdo2);
190 msp3400c_init_data[type].cdo2);
191 208
192 msp_write_dem(client, 0x0056, 0); /*LOAD_REG_1/2*/ 209 msp_set_source(client, data->dsp_src);
193 210 msp_write_dsp(client, 0x000e, data->dsp_matrix);
194 if (msp_dolby) {
195 msp_write_dsp(client, 0x0008, 0x0520); /* I2S1 */
196 msp_write_dsp(client, 0x0009, 0x0620); /* I2S2 */
197 msp_write_dsp(client, 0x000b, msp3400c_init_data[type].dsp_src);
198 } else {
199 msp_write_dsp(client, 0x0008, msp3400c_init_data[type].dsp_src);
200 msp_write_dsp(client, 0x0009, msp3400c_init_data[type].dsp_src);
201 msp_write_dsp(client, 0x000b, msp3400c_init_data[type].dsp_src);
202 }
203 msp_write_dsp(client, 0x000a, msp3400c_init_data[type].dsp_src);
204 msp_write_dsp(client, 0x000e, msp3400c_init_data[type].dsp_matrix);
205 211
206 if (state->has_nicam) { 212 if (state->has_nicam) {
207 /* nicam prescale */ 213 /* nicam prescale */
@@ -209,29 +215,31 @@ void msp3400c_setmode(struct i2c_client *client, int type)
209 } 215 }
210} 216}
211 217
212/* turn on/off nicam + stereo */ 218/* Set audio mode. Note that the pre-'G' models do not support BTSC+SAP,
213void msp3400c_setstereo(struct i2c_client *client, int mode) 219 nor do they support stereo BTSC. */
220static void msp3400c_set_audmode(struct i2c_client *client)
214{ 221{
215 static char *strmode[] = { "mono", "stereo", "lang2", "lang1" }; 222 static char *strmode[] = { "mono", "stereo", "lang2", "lang1" };
216 struct msp_state *state = i2c_get_clientdata(client); 223 struct msp_state *state = i2c_get_clientdata(client);
217 int nicam = 0; /* channel source: FM/AM or nicam */ 224 char *modestr = (state->audmode >= 0 && state->audmode < 4) ?
218 int src = 0; 225 strmode[state->audmode] : "unknown";
226 int src = 0; /* channel source: FM/AM, nicam or SCART */
219 227
220 if (state->opmode == OPMODE_AUTOSELECT) { 228 if (state->opmode == OPMODE_AUTOSELECT) {
221 /* this method would break everything, let's make sure 229 /* this method would break everything, let's make sure
222 * it's never called 230 * it's never called
223 */ 231 */
224 v4l_dbg(1, msp_debug, client, "setstereo called with mode=%d instead of set_source (ignored)\n", 232 v4l_dbg(1, msp_debug, client,
225 mode); 233 "set_audmode called with mode=%d instead of set_source (ignored)\n",
234 state->audmode);
226 return; 235 return;
227 } 236 }
228 237
229 /* switch demodulator */ 238 /* switch demodulator */
230 switch (state->mode) { 239 switch (state->mode) {
231 case MSP_MODE_FM_TERRA: 240 case MSP_MODE_FM_TERRA:
232 v4l_dbg(1, msp_debug, client, "FM setstereo: %s\n", strmode[mode]); 241 v4l_dbg(1, msp_debug, client, "FM set_audmode: %s\n", modestr);
233 msp3400c_setcarrier(client, state->second, state->main); 242 switch (state->audmode) {
234 switch (mode) {
235 case V4L2_TUNER_MODE_STEREO: 243 case V4L2_TUNER_MODE_STEREO:
236 msp_write_dsp(client, 0x000e, 0x3001); 244 msp_write_dsp(client, 0x000e, 0x3001);
237 break; 245 break;
@@ -243,50 +251,49 @@ void msp3400c_setstereo(struct i2c_client *client, int mode)
243 } 251 }
244 break; 252 break;
245 case MSP_MODE_FM_SAT: 253 case MSP_MODE_FM_SAT:
246 v4l_dbg(1, msp_debug, client, "SAT setstereo: %s\n", strmode[mode]); 254 v4l_dbg(1, msp_debug, client, "SAT set_audmode: %s\n", modestr);
247 switch (mode) { 255 switch (state->audmode) {
248 case V4L2_TUNER_MODE_MONO: 256 case V4L2_TUNER_MODE_MONO:
249 msp3400c_setcarrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5)); 257 msp3400c_set_carrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5));
250 break; 258 break;
251 case V4L2_TUNER_MODE_STEREO: 259 case V4L2_TUNER_MODE_STEREO:
252 msp3400c_setcarrier(client, MSP_CARRIER(7.2), MSP_CARRIER(7.02)); 260 msp3400c_set_carrier(client, MSP_CARRIER(7.2), MSP_CARRIER(7.02));
253 break; 261 break;
254 case V4L2_TUNER_MODE_LANG1: 262 case V4L2_TUNER_MODE_LANG1:
255 msp3400c_setcarrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02)); 263 msp3400c_set_carrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02));
256 break; 264 break;
257 case V4L2_TUNER_MODE_LANG2: 265 case V4L2_TUNER_MODE_LANG2:
258 msp3400c_setcarrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02)); 266 msp3400c_set_carrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02));
259 break; 267 break;
260 } 268 }
261 break; 269 break;
262 case MSP_MODE_FM_NICAM1: 270 case MSP_MODE_FM_NICAM1:
263 case MSP_MODE_FM_NICAM2: 271 case MSP_MODE_FM_NICAM2:
264 case MSP_MODE_AM_NICAM: 272 case MSP_MODE_AM_NICAM:
265 v4l_dbg(1, msp_debug, client, "NICAM setstereo: %s\n",strmode[mode]); 273 v4l_dbg(1, msp_debug, client, "NICAM set_audmode: %s\n",modestr);
266 msp3400c_setcarrier(client,state->second,state->main); 274 msp3400c_set_carrier(client, state->second, state->main);
267 if (state->nicam_on) 275 if (state->nicam_on)
268 nicam=0x0100; 276 src = 0x0100; /* NICAM */
269 break; 277 break;
270 case MSP_MODE_BTSC: 278 case MSP_MODE_BTSC:
271 v4l_dbg(1, msp_debug, client, "BTSC setstereo: %s\n",strmode[mode]); 279 v4l_dbg(1, msp_debug, client, "BTSC set_audmode: %s\n",modestr);
272 nicam=0x0300;
273 break; 280 break;
274 case MSP_MODE_EXTERN: 281 case MSP_MODE_EXTERN:
275 v4l_dbg(1, msp_debug, client, "extern setstereo: %s\n",strmode[mode]); 282 v4l_dbg(1, msp_debug, client, "extern set_audmode: %s\n",modestr);
276 nicam = 0x0200; 283 src = 0x0200; /* SCART */
277 break; 284 break;
278 case MSP_MODE_FM_RADIO: 285 case MSP_MODE_FM_RADIO:
279 v4l_dbg(1, msp_debug, client, "FM-Radio setstereo: %s\n",strmode[mode]); 286 v4l_dbg(1, msp_debug, client, "FM-Radio set_audmode: %s\n",modestr);
280 break; 287 break;
281 default: 288 default:
282 v4l_dbg(1, msp_debug, client, "mono setstereo\n"); 289 v4l_dbg(1, msp_debug, client, "mono set_audmode\n");
283 return; 290 return;
284 } 291 }
285 292
286 /* switch audio */ 293 /* switch audio */
287 switch (mode) { 294 switch (state->audmode) {
288 case V4L2_TUNER_MODE_STEREO: 295 case V4L2_TUNER_MODE_STEREO:
289 src = 0x0020 | nicam; 296 src |= 0x0020;
290 break; 297 break;
291 case V4L2_TUNER_MODE_MONO: 298 case V4L2_TUNER_MODE_MONO:
292 if (state->mode == MSP_MODE_AM_NICAM) { 299 if (state->mode == MSP_MODE_AM_NICAM) {
@@ -297,29 +304,22 @@ void msp3400c_setstereo(struct i2c_client *client, int mode)
297 src = 0x0200; 304 src = 0x0200;
298 break; 305 break;
299 } 306 }
307 if (state->rxsubchans & V4L2_TUNER_SUB_STEREO)
308 src = 0x0030;
309 break;
300 case V4L2_TUNER_MODE_LANG1: 310 case V4L2_TUNER_MODE_LANG1:
301 src = 0x0000 | nicam; 311 /* switch to stereo for stereo transmission, otherwise
312 keep first language */
313 if (state->rxsubchans & V4L2_TUNER_SUB_STEREO)
314 src |= 0x0020;
302 break; 315 break;
303 case V4L2_TUNER_MODE_LANG2: 316 case V4L2_TUNER_MODE_LANG2:
304 src = 0x0010 | nicam; 317 src |= 0x0010;
305 break; 318 break;
306 } 319 }
307 v4l_dbg(1, msp_debug, client, "setstereo final source/matrix = 0x%x\n", src); 320 v4l_dbg(1, msp_debug, client, "set_audmode final source/matrix = 0x%x\n", src);
308 321
309 if (msp_dolby) { 322 msp_set_source(client, src);
310 msp_write_dsp(client, 0x0008, 0x0520);
311 msp_write_dsp(client, 0x0009, 0x0620);
312 msp_write_dsp(client, 0x000a, src);
313 msp_write_dsp(client, 0x000b, src);
314 } else {
315 msp_write_dsp(client, 0x0008, src);
316 msp_write_dsp(client, 0x0009, src);
317 msp_write_dsp(client, 0x000a, src);
318 msp_write_dsp(client, 0x000b, src);
319 msp_write_dsp(client, 0x000c, src);
320 if (state->has_scart23_in_scart2_out)
321 msp_write_dsp(client, 0x0041, src);
322 }
323} 323}
324 324
325static void msp3400c_print_mode(struct i2c_client *client) 325static void msp3400c_print_mode(struct i2c_client *client)
@@ -347,12 +347,12 @@ static void msp3400c_print_mode(struct i2c_client *client)
347 347
348/* ----------------------------------------------------------------------- */ 348/* ----------------------------------------------------------------------- */
349 349
350int autodetect_stereo(struct i2c_client *client) 350static int msp3400c_detect_stereo(struct i2c_client *client)
351{ 351{
352 struct msp_state *state = i2c_get_clientdata(client); 352 struct msp_state *state = i2c_get_clientdata(client);
353 int val; 353 int val;
354 int rxsubchans = state->rxsubchans; 354 int rxsubchans = state->rxsubchans;
355 int newnicam = state->nicam_on; 355 int newnicam = state->nicam_on;
356 int update = 0; 356 int update = 0;
357 357
358 switch (state->mode) { 358 switch (state->mode) {
@@ -362,7 +362,7 @@ int autodetect_stereo(struct i2c_client *client)
362 val -= 65536; 362 val -= 65536;
363 v4l_dbg(2, msp_debug, client, "stereo detect register: %d\n", val); 363 v4l_dbg(2, msp_debug, client, "stereo detect register: %d\n", val);
364 if (val > 4096) { 364 if (val > 4096) {
365 rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; 365 rxsubchans = V4L2_TUNER_SUB_STEREO;
366 } else if (val < -4096) { 366 } else if (val < -4096) {
367 rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 367 rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
368 } else { 368 } else {
@@ -386,14 +386,11 @@ int autodetect_stereo(struct i2c_client *client)
386 break; 386 break;
387 case 1: 387 case 1:
388 case 9: 388 case 9:
389 rxsubchans = V4L2_TUNER_SUB_MONO 389 rxsubchans = V4L2_TUNER_SUB_MONO;
390 | V4L2_TUNER_SUB_LANG1;
391 break; 390 break;
392 case 2: 391 case 2:
393 case 10: 392 case 10:
394 rxsubchans = V4L2_TUNER_SUB_MONO 393 rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
395 | V4L2_TUNER_SUB_LANG1
396 | V4L2_TUNER_SUB_LANG2;
397 break; 394 break;
398 default: 395 default:
399 rxsubchans = V4L2_TUNER_SUB_MONO; 396 rxsubchans = V4L2_TUNER_SUB_MONO;
@@ -405,30 +402,17 @@ int autodetect_stereo(struct i2c_client *client)
405 rxsubchans = V4L2_TUNER_SUB_MONO; 402 rxsubchans = V4L2_TUNER_SUB_MONO;
406 } 403 }
407 break; 404 break;
408 case MSP_MODE_BTSC:
409 val = msp_read_dem(client, 0x200);
410 v4l_dbg(2, msp_debug, client, "status=0x%x (pri=%s, sec=%s, %s%s%s)\n",
411 val,
412 (val & 0x0002) ? "no" : "yes",
413 (val & 0x0004) ? "no" : "yes",
414 (val & 0x0040) ? "stereo" : "mono",
415 (val & 0x0080) ? ", nicam 2nd mono" : "",
416 (val & 0x0100) ? ", bilingual/SAP" : "");
417 rxsubchans = V4L2_TUNER_SUB_MONO;
418 if (val & 0x0040) rxsubchans |= V4L2_TUNER_SUB_STEREO;
419 if (val & 0x0100) rxsubchans |= V4L2_TUNER_SUB_LANG1;
420 break;
421 } 405 }
422 if (rxsubchans != state->rxsubchans) { 406 if (rxsubchans != state->rxsubchans) {
423 update = 1; 407 update = 1;
424 v4l_dbg(1, msp_debug, client, "watch: rxsubchans %d => %d\n", 408 v4l_dbg(1, msp_debug, client, "watch: rxsubchans %02x => %02x\n",
425 state->rxsubchans,rxsubchans); 409 state->rxsubchans, rxsubchans);
426 state->rxsubchans = rxsubchans; 410 state->rxsubchans = rxsubchans;
427 } 411 }
428 if (newnicam != state->nicam_on) { 412 if (newnicam != state->nicam_on) {
429 update = 1; 413 update = 1;
430 v4l_dbg(1, msp_debug, client, "watch: nicam %d => %d\n", 414 v4l_dbg(1, msp_debug, client, "watch: nicam %d => %d\n",
431 state->nicam_on,newnicam); 415 state->nicam_on, newnicam);
432 state->nicam_on = newnicam; 416 state->nicam_on = newnicam;
433 } 417 }
434 return update; 418 return update;
@@ -443,13 +427,8 @@ static void watch_stereo(struct i2c_client *client)
443{ 427{
444 struct msp_state *state = i2c_get_clientdata(client); 428 struct msp_state *state = i2c_get_clientdata(client);
445 429
446 if (autodetect_stereo(client)) { 430 if (msp3400c_detect_stereo(client)) {
447 if (state->rxsubchans & V4L2_TUNER_SUB_STEREO) 431 msp3400c_set_audmode(client);
448 msp3400c_setstereo(client, V4L2_TUNER_MODE_STEREO);
449 else if (state->rxsubchans & V4L2_TUNER_SUB_LANG1)
450 msp3400c_setstereo(client, V4L2_TUNER_MODE_LANG1);
451 else
452 msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
453 } 432 }
454 433
455 if (msp_once) 434 if (msp_once)
@@ -461,7 +440,7 @@ int msp3400c_thread(void *data)
461 struct i2c_client *client = data; 440 struct i2c_client *client = data;
462 struct msp_state *state = i2c_get_clientdata(client); 441 struct msp_state *state = i2c_get_clientdata(client);
463 struct msp3400c_carrier_detect *cd; 442 struct msp3400c_carrier_detect *cd;
464 int count, max1,max2,val1,val2, val,this; 443 int count, max1, max2, val1, val2, val, this;
465 444
466 445
467 v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n"); 446 v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
@@ -471,7 +450,7 @@ int msp3400c_thread(void *data)
471 v4l_dbg(2, msp_debug, client, "msp3400 thread: wakeup\n"); 450 v4l_dbg(2, msp_debug, client, "msp3400 thread: wakeup\n");
472 451
473 restart: 452 restart:
474 v4l_dbg(1, msp_debug, client, "thread: restart scan\n"); 453 v4l_dbg(2, msp_debug, client, "thread: restart scan\n");
475 state->restart = 0; 454 state->restart = 0;
476 if (kthread_should_stop()) 455 if (kthread_should_stop())
477 break; 456 break;
@@ -485,13 +464,14 @@ int msp3400c_thread(void *data)
485 464
486 /* mute */ 465 /* mute */
487 msp_set_mute(client); 466 msp_set_mute(client);
488 msp3400c_setmode(client, MSP_MODE_AM_DETECT /* +1 */ ); 467 msp3400c_set_mode(client, MSP_MODE_AM_DETECT /* +1 */ );
489 val1 = val2 = 0; 468 val1 = val2 = 0;
490 max1 = max2 = -1; 469 max1 = max2 = -1;
491 state->watch_stereo = 0; 470 state->watch_stereo = 0;
471 state->nicam_on = 0;
492 472
493 /* some time for the tuner to sync */ 473 /* some time for the tuner to sync */
494 if (msp_sleep(state,200)) 474 if (msp_sleep(state, 200))
495 goto restart; 475 goto restart;
496 476
497 /* carrier detect pass #1 -- main carrier */ 477 /* carrier detect pass #1 -- main carrier */
@@ -506,7 +486,7 @@ int msp3400c_thread(void *data)
506 } 486 }
507 487
508 for (this = 0; this < count; this++) { 488 for (this = 0; this < count; this++) {
509 msp3400c_setcarrier(client, cd[this].cdo,cd[this].cdo); 489 msp3400c_set_carrier(client, cd[this].cdo, cd[this].cdo);
510 if (msp_sleep(state,100)) 490 if (msp_sleep(state,100))
511 goto restart; 491 goto restart;
512 val = msp_read_dsp(client, 0x1b); 492 val = msp_read_dsp(client, 0x1b);
@@ -542,7 +522,7 @@ int msp3400c_thread(void *data)
542 max2 = 0; 522 max2 = 0;
543 } 523 }
544 for (this = 0; this < count; this++) { 524 for (this = 0; this < count; this++) {
545 msp3400c_setcarrier(client, cd[this].cdo,cd[this].cdo); 525 msp3400c_set_carrier(client, cd[this].cdo, cd[this].cdo);
546 if (msp_sleep(state,100)) 526 if (msp_sleep(state,100))
547 goto restart; 527 goto restart;
548 val = msp_read_dsp(client, 0x1b); 528 val = msp_read_dsp(client, 0x1b);
@@ -554,22 +534,20 @@ int msp3400c_thread(void *data)
554 } 534 }
555 535
556 /* program the msp3400 according to the results */ 536 /* program the msp3400 according to the results */
557 state->main = msp3400c_carrier_detect_main[max1].cdo; 537 state->main = msp3400c_carrier_detect_main[max1].cdo;
558 switch (max1) { 538 switch (max1) {
559 case 1: /* 5.5 */ 539 case 1: /* 5.5 */
560 if (max2 == 0) { 540 if (max2 == 0) {
561 /* B/G FM-stereo */ 541 /* B/G FM-stereo */
562 state->second = msp3400c_carrier_detect_55[max2].cdo; 542 state->second = msp3400c_carrier_detect_55[max2].cdo;
563 msp3400c_setmode(client, MSP_MODE_FM_TERRA); 543 msp3400c_set_mode(client, MSP_MODE_FM_TERRA);
564 state->nicam_on = 0;
565 msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
566 state->watch_stereo = 1; 544 state->watch_stereo = 1;
567 } else if (max2 == 1 && state->has_nicam) { 545 } else if (max2 == 1 && state->has_nicam) {
568 /* B/G NICAM */ 546 /* B/G NICAM */
569 state->second = msp3400c_carrier_detect_55[max2].cdo; 547 state->second = msp3400c_carrier_detect_55[max2].cdo;
570 msp3400c_setmode(client, MSP_MODE_FM_NICAM1); 548 msp3400c_set_mode(client, MSP_MODE_FM_NICAM1);
549 msp3400c_set_carrier(client, state->second, state->main);
571 state->nicam_on = 1; 550 state->nicam_on = 1;
572 msp3400c_setcarrier(client, state->second, state->main);
573 state->watch_stereo = 1; 551 state->watch_stereo = 1;
574 } else { 552 } else {
575 goto no_second; 553 goto no_second;
@@ -578,35 +556,31 @@ int msp3400c_thread(void *data)
578 case 2: /* 6.0 */ 556 case 2: /* 6.0 */
579 /* PAL I NICAM */ 557 /* PAL I NICAM */
580 state->second = MSP_CARRIER(6.552); 558 state->second = MSP_CARRIER(6.552);
581 msp3400c_setmode(client, MSP_MODE_FM_NICAM2); 559 msp3400c_set_mode(client, MSP_MODE_FM_NICAM2);
560 msp3400c_set_carrier(client, state->second, state->main);
582 state->nicam_on = 1; 561 state->nicam_on = 1;
583 msp3400c_setcarrier(client, state->second, state->main);
584 state->watch_stereo = 1; 562 state->watch_stereo = 1;
585 break; 563 break;
586 case 3: /* 6.5 */ 564 case 3: /* 6.5 */
587 if (max2 == 1 || max2 == 2) { 565 if (max2 == 1 || max2 == 2) {
588 /* D/K FM-stereo */ 566 /* D/K FM-stereo */
589 state->second = msp3400c_carrier_detect_65[max2].cdo; 567 state->second = msp3400c_carrier_detect_65[max2].cdo;
590 msp3400c_setmode(client, MSP_MODE_FM_TERRA); 568 msp3400c_set_mode(client, MSP_MODE_FM_TERRA);
591 state->nicam_on = 0;
592 msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
593 state->watch_stereo = 1; 569 state->watch_stereo = 1;
594 } else if (max2 == 0 && (state->v4l2_std & V4L2_STD_SECAM)) { 570 } else if (max2 == 0 && (state->v4l2_std & V4L2_STD_SECAM)) {
595 /* L NICAM or AM-mono */ 571 /* L NICAM or AM-mono */
596 state->second = msp3400c_carrier_detect_65[max2].cdo; 572 state->second = msp3400c_carrier_detect_65[max2].cdo;
597 msp3400c_setmode(client, MSP_MODE_AM_NICAM); 573 msp3400c_set_mode(client, MSP_MODE_AM_NICAM);
598 state->nicam_on = 0; 574 msp3400c_set_carrier(client, state->second, state->main);
599 msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
600 msp3400c_setcarrier(client, state->second, state->main);
601 /* volume prescale for SCART (AM mono input) */ 575 /* volume prescale for SCART (AM mono input) */
602 msp_write_dsp(client, 0x000d, 0x1900); 576 msp_write_dsp(client, 0x000d, 0x1900);
603 state->watch_stereo = 1; 577 state->watch_stereo = 1;
604 } else if (max2 == 0 && state->has_nicam) { 578 } else if (max2 == 0 && state->has_nicam) {
605 /* D/K NICAM */ 579 /* D/K NICAM */
606 state->second = msp3400c_carrier_detect_65[max2].cdo; 580 state->second = msp3400c_carrier_detect_65[max2].cdo;
607 msp3400c_setmode(client, MSP_MODE_FM_NICAM1); 581 msp3400c_set_mode(client, MSP_MODE_FM_NICAM1);
582 msp3400c_set_carrier(client, state->second, state->main);
608 state->nicam_on = 1; 583 state->nicam_on = 1;
609 msp3400c_setcarrier(client, state->second, state->main);
610 state->watch_stereo = 1; 584 state->watch_stereo = 1;
611 } else { 585 } else {
612 goto no_second; 586 goto no_second;
@@ -616,23 +590,25 @@ int msp3400c_thread(void *data)
616 default: 590 default:
617 no_second: 591 no_second:
618 state->second = msp3400c_carrier_detect_main[max1].cdo; 592 state->second = msp3400c_carrier_detect_main[max1].cdo;
619 msp3400c_setmode(client, MSP_MODE_FM_TERRA); 593 msp3400c_set_mode(client, MSP_MODE_FM_TERRA);
620 state->nicam_on = 0; 594 msp3400c_set_carrier(client, state->second, state->main);
621 msp3400c_setcarrier(client, state->second, state->main);
622 state->rxsubchans = V4L2_TUNER_SUB_MONO; 595 state->rxsubchans = V4L2_TUNER_SUB_MONO;
623 msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
624 break; 596 break;
625 } 597 }
626 598
627 /* unmute */ 599 /* unmute */
628 msp_set_audio(client); 600 msp_set_audio(client);
601 msp3400c_set_audmode(client);
629 602
630 if (msp_debug) 603 if (msp_debug)
631 msp3400c_print_mode(client); 604 msp3400c_print_mode(client);
632 605
633 /* monitor tv audio mode */ 606 /* monitor tv audio mode, the first time don't wait
607 so long to get a quick stereo/bilingual result */
608 if (msp_sleep(state, 1000))
609 goto restart;
634 while (state->watch_stereo) { 610 while (state->watch_stereo) {
635 if (msp_sleep(state,5000)) 611 if (msp_sleep(state, 5000))
636 goto restart; 612 goto restart;
637 watch_stereo(client); 613 watch_stereo(client);
638 } 614 }
@@ -656,7 +632,7 @@ int msp3410d_thread(void *data)
656 v4l_dbg(2, msp_debug, client, "msp3410 thread: wakeup\n"); 632 v4l_dbg(2, msp_debug, client, "msp3410 thread: wakeup\n");
657 633
658 restart: 634 restart:
659 v4l_dbg(1, msp_debug, client, "thread: restart scan\n"); 635 v4l_dbg(2, msp_debug, client, "thread: restart scan\n");
660 state->restart = 0; 636 state->restart = 0;
661 if (kthread_should_stop()) 637 if (kthread_should_stop())
662 break; 638 break;
@@ -681,9 +657,10 @@ int msp3410d_thread(void *data)
681 else 657 else
682 std = (state->v4l2_std & V4L2_STD_NTSC) ? 0x20 : 1; 658 std = (state->v4l2_std & V4L2_STD_NTSC) ? 0x20 : 1;
683 state->watch_stereo = 0; 659 state->watch_stereo = 0;
660 state->nicam_on = 0;
684 661
685 if (msp_debug) 662 if (msp_debug)
686 v4l_dbg(1, msp_debug, client, "setting standard: %s (0x%04x)\n", 663 v4l_dbg(2, msp_debug, client, "setting standard: %s (0x%04x)\n",
687 msp_standard_std_name(std), std); 664 msp_standard_std_name(std), std);
688 665
689 if (std != 1) { 666 if (std != 1) {
@@ -700,7 +677,7 @@ int msp3410d_thread(void *data)
700 val = msp_read_dem(client, 0x7e); 677 val = msp_read_dem(client, 0x7e);
701 if (val < 0x07ff) 678 if (val < 0x07ff)
702 break; 679 break;
703 v4l_dbg(1, msp_debug, client, "detection still in progress\n"); 680 v4l_dbg(2, msp_debug, client, "detection still in progress\n");
704 } 681 }
705 } 682 }
706 for (i = 0; msp_stdlist[i].name != NULL; i++) 683 for (i = 0; msp_stdlist[i].name != NULL; i++)
@@ -739,48 +716,34 @@ int msp3410d_thread(void *data)
739 state->rxsubchans = V4L2_TUNER_SUB_STEREO; 716 state->rxsubchans = V4L2_TUNER_SUB_STEREO;
740 state->nicam_on = 1; 717 state->nicam_on = 1;
741 state->watch_stereo = 1; 718 state->watch_stereo = 1;
742 msp3400c_setstereo(client,V4L2_TUNER_MODE_STEREO);
743 break; 719 break;
744 case 0x0009: 720 case 0x0009:
745 state->mode = MSP_MODE_AM_NICAM; 721 state->mode = MSP_MODE_AM_NICAM;
746 state->rxsubchans = V4L2_TUNER_SUB_MONO; 722 state->rxsubchans = V4L2_TUNER_SUB_MONO;
747 state->nicam_on = 1; 723 state->nicam_on = 1;
748 msp3400c_setstereo(client,V4L2_TUNER_MODE_MONO);
749 state->watch_stereo = 1; 724 state->watch_stereo = 1;
750 break; 725 break;
751 case 0x0020: /* BTSC */ 726 case 0x0020: /* BTSC */
752 /* just turn on stereo */ 727 /* The pre-'G' models only have BTSC-mono */
753 state->mode = MSP_MODE_BTSC; 728 state->mode = MSP_MODE_BTSC;
754 state->rxsubchans = V4L2_TUNER_SUB_STEREO; 729 state->rxsubchans = V4L2_TUNER_SUB_MONO;
755 state->nicam_on = 0;
756 state->watch_stereo = 1;
757 msp3400c_setstereo(client,V4L2_TUNER_MODE_STEREO);
758 break; 730 break;
759 case 0x0040: /* FM radio */ 731 case 0x0040: /* FM radio */
760 state->mode = MSP_MODE_FM_RADIO; 732 state->mode = MSP_MODE_FM_RADIO;
761 state->rxsubchans = V4L2_TUNER_SUB_STEREO; 733 state->rxsubchans = V4L2_TUNER_SUB_STEREO;
762 state->audmode = V4L2_TUNER_MODE_STEREO;
763 state->nicam_on = 0;
764 state->watch_stereo = 0;
765 /* not needed in theory if we have radio, but 734 /* not needed in theory if we have radio, but
766 short programming enables carrier mute */ 735 short programming enables carrier mute */
767 msp3400c_setmode(client, MSP_MODE_FM_RADIO); 736 msp3400c_set_mode(client, MSP_MODE_FM_RADIO);
768 msp3400c_setcarrier(client, MSP_CARRIER(10.7), 737 msp3400c_set_carrier(client, MSP_CARRIER(10.7),
769 MSP_CARRIER(10.7)); 738 MSP_CARRIER(10.7));
770 /* scart routing */ 739 /* scart routing (this doesn't belong here I think) */
771 msp_set_scart(client,SCART_IN2,0); 740 msp_set_scart(client,SCART_IN2,0);
772 /* msp34xx does radio decoding */
773 msp_write_dsp(client, 0x08, 0x0020);
774 msp_write_dsp(client, 0x09, 0x0020);
775 msp_write_dsp(client, 0x0b, 0x0020);
776 break; 741 break;
777 case 0x0003: 742 case 0x0003:
778 case 0x0004: 743 case 0x0004:
779 case 0x0005: 744 case 0x0005:
780 state->mode = MSP_MODE_FM_TERRA; 745 state->mode = MSP_MODE_FM_TERRA;
781 state->rxsubchans = V4L2_TUNER_SUB_MONO; 746 state->rxsubchans = V4L2_TUNER_SUB_MONO;
782 state->audmode = V4L2_TUNER_MODE_MONO;
783 state->nicam_on = 0;
784 state->watch_stereo = 1; 747 state->watch_stereo = 1;
785 break; 748 break;
786 } 749 }
@@ -791,11 +754,16 @@ int msp3410d_thread(void *data)
791 if (state->has_i2s_conf) 754 if (state->has_i2s_conf)
792 msp_write_dem(client, 0x40, state->i2s_mode); 755 msp_write_dem(client, 0x40, state->i2s_mode);
793 756
794 /* monitor tv audio mode */ 757 msp3400c_set_audmode(client);
758
759 /* monitor tv audio mode, the first time don't wait
760 so long to get a quick stereo/bilingual result */
761 if (msp_sleep(state, 1000))
762 goto restart;
795 while (state->watch_stereo) { 763 while (state->watch_stereo) {
796 if (msp_sleep(state,5000))
797 goto restart;
798 watch_stereo(client); 764 watch_stereo(client);
765 if (msp_sleep(state, 5000))
766 goto restart;
799 } 767 }
800 } 768 }
801 v4l_dbg(1, msp_debug, client, "thread: exit\n"); 769 v4l_dbg(1, msp_debug, client, "thread: exit\n");
@@ -813,7 +781,7 @@ int msp3410d_thread(void *data)
813 * the value for source is the same as bit 15:8 of DSP registers 0x08, 781 * the value for source is the same as bit 15:8 of DSP registers 0x08,
814 * 0x0a and 0x0c: 0=mono, 1=stereo or A|B, 2=SCART, 3=stereo or A, 4=stereo or B 782 * 0x0a and 0x0c: 0=mono, 1=stereo or A|B, 2=SCART, 3=stereo or A, 4=stereo or B
815 * 783 *
816 * this function replaces msp3400c_setstereo 784 * this function replaces msp3400c_set_audmode
817 */ 785 */
818static void msp34xxg_set_source(struct i2c_client *client, int source) 786static void msp34xxg_set_source(struct i2c_client *client, int source)
819{ 787{
@@ -826,12 +794,7 @@ static void msp34xxg_set_source(struct i2c_client *client, int source)
826 int value = (source & 0x07) << 8 | (source == 0 ? 0x30 : 0x20); 794 int value = (source & 0x07) << 8 | (source == 0 ? 0x30 : 0x20);
827 795
828 v4l_dbg(1, msp_debug, client, "set source to %d (0x%x)\n", source, value); 796 v4l_dbg(1, msp_debug, client, "set source to %d (0x%x)\n", source, value);
829 /* Loudspeaker Output */ 797 msp_set_source(client, value);
830 msp_write_dsp(client, 0x08, value);
831 /* SCART1 DA Output */
832 msp_write_dsp(client, 0x0a, value);
833 /* Quasi-peak detector */
834 msp_write_dsp(client, 0x0c, value);
835 /* 798 /*
836 * set identification threshold. Personally, I 799 * set identification threshold. Personally, I
837 * I set it to a higher value that the default 800 * I set it to a higher value that the default
@@ -948,13 +911,14 @@ int msp34xxg_thread(void *data)
948 if (msp_write_dsp(client, 0x13, state->acb)) 911 if (msp_write_dsp(client, 0x13, state->acb))
949 return -1; 912 return -1;
950 913
951 msp_write_dem(client, 0x40, state->i2s_mode); 914 if (state->has_i2s_conf)
915 msp_write_dem(client, 0x40, state->i2s_mode);
952 } 916 }
953 v4l_dbg(1, msp_debug, client, "thread: exit\n"); 917 v4l_dbg(1, msp_debug, client, "thread: exit\n");
954 return 0; 918 return 0;
955} 919}
956 920
957void msp34xxg_detect_stereo(struct i2c_client *client) 921static void msp34xxg_detect_stereo(struct i2c_client *client)
958{ 922{
959 struct msp_state *state = i2c_get_clientdata(client); 923 struct msp_state *state = i2c_get_clientdata(client);
960 924
@@ -964,11 +928,11 @@ void msp34xxg_detect_stereo(struct i2c_client *client)
964 928
965 state->rxsubchans = 0; 929 state->rxsubchans = 0;
966 if (is_stereo) 930 if (is_stereo)
967 state->rxsubchans |= V4L2_TUNER_SUB_STEREO; 931 state->rxsubchans = V4L2_TUNER_SUB_STEREO;
968 else 932 else
969 state->rxsubchans |= V4L2_TUNER_SUB_MONO; 933 state->rxsubchans = V4L2_TUNER_SUB_MONO;
970 if (is_bilingual) { 934 if (is_bilingual) {
971 state->rxsubchans |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 935 state->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
972 /* I'm supposed to check whether it's SAP or not 936 /* I'm supposed to check whether it's SAP or not
973 * and set only LANG2/SAP in this case. Yet, the MSP 937 * and set only LANG2/SAP in this case. Yet, the MSP
974 * does a lot of work to hide this and handle everything 938 * does a lot of work to hide this and handle everything
@@ -980,12 +944,12 @@ void msp34xxg_detect_stereo(struct i2c_client *client)
980 status, is_stereo, is_bilingual, state->rxsubchans); 944 status, is_stereo, is_bilingual, state->rxsubchans);
981} 945}
982 946
983void msp34xxg_set_audmode(struct i2c_client *client, int audmode) 947static void msp34xxg_set_audmode(struct i2c_client *client)
984{ 948{
985 struct msp_state *state = i2c_get_clientdata(client); 949 struct msp_state *state = i2c_get_clientdata(client);
986 int source; 950 int source;
987 951
988 switch (audmode) { 952 switch (state->audmode) {
989 case V4L2_TUNER_MODE_MONO: 953 case V4L2_TUNER_MODE_MONO:
990 source = 0; /* mono only */ 954 source = 0; /* mono only */
991 break; 955 break;
@@ -1000,11 +964,40 @@ void msp34xxg_set_audmode(struct i2c_client *client, int audmode)
1000 source = 4; /* stereo or B */ 964 source = 4; /* stereo or B */
1001 break; 965 break;
1002 default: 966 default:
1003 audmode = 0;
1004 source = 1; 967 source = 1;
1005 break; 968 break;
1006 } 969 }
1007 state->audmode = audmode;
1008 msp34xxg_set_source(client, source); 970 msp34xxg_set_source(client, source);
1009} 971}
1010 972
973void msp_set_audmode(struct i2c_client *client)
974{
975 struct msp_state *state = i2c_get_clientdata(client);
976
977 switch (state->opmode) {
978 case OPMODE_MANUAL:
979 case OPMODE_AUTODETECT:
980 state->watch_stereo = 0;
981 msp3400c_set_audmode(client);
982 break;
983 case OPMODE_AUTOSELECT:
984 msp34xxg_set_audmode(client);
985 break;
986 }
987}
988
989void msp_detect_stereo(struct i2c_client *client)
990{
991 struct msp_state *state = i2c_get_clientdata(client);
992
993 switch (state->opmode) {
994 case OPMODE_MANUAL:
995 case OPMODE_AUTODETECT:
996 msp3400c_detect_stereo(client);
997 break;
998 case OPMODE_AUTOSELECT:
999 msp34xxg_detect_stereo(client);
1000 break;
1001 }
1002}
1003
diff --git a/drivers/media/video/msp3400.h b/drivers/media/video/msp3400.h
index a9ac57d0700b..6fb5c8c994e7 100644
--- a/drivers/media/video/msp3400.h
+++ b/drivers/media/video/msp3400.h
@@ -104,14 +104,12 @@ int msp_sleep(struct msp_state *state, int timeout);
104 104
105/* msp3400-kthreads.c */ 105/* msp3400-kthreads.c */
106const char *msp_standard_std_name(int std); 106const char *msp_standard_std_name(int std);
107void msp3400c_setcarrier(struct i2c_client *client, int cdo1, int cdo2); 107void msp_set_audmode(struct i2c_client *client);
108void msp3400c_setmode(struct i2c_client *client, int type); 108void msp_detect_stereo(struct i2c_client *client);
109void msp3400c_setstereo(struct i2c_client *client, int mode);
110int autodetect_stereo(struct i2c_client *client);
111int msp3400c_thread(void *data); 109int msp3400c_thread(void *data);
112int msp3410d_thread(void *data); 110int msp3410d_thread(void *data);
113int msp34xxg_thread(void *data); 111int msp34xxg_thread(void *data);
114void msp34xxg_detect_stereo(struct i2c_client *client); 112void msp3400c_set_mode(struct i2c_client *client, int mode);
115void msp34xxg_set_audmode(struct i2c_client *client, int audmode); 113void msp3400c_set_carrier(struct i2c_client *client, int cdo1, int cdo2);
116 114
117#endif /* MSP3400_H */ 115#endif /* MSP3400_H */
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 41715cacf926..eb3b31867494 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -1,11 +1,11 @@
1/* 1/*
2 mxb - v4l2 driver for the Multimedia eXtension Board 2 mxb - v4l2 driver for the Multimedia eXtension Board
3 3
4 Copyright (C) 1998-2006 Michael Hunold <michael@mihu.de> 4 Copyright (C) 1998-2006 Michael Hunold <michael@mihu.de>
5 5
6 Visit http://www.mihu.de/linux/saa7146/mxb/ 6 Visit http://www.mihu.de/linux/saa7146/mxb/
7 for further details about this card. 7 for further details about this card.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or 11 the Free Software Foundation; either version 2 of the License, or
@@ -35,12 +35,12 @@
35 35
36#define I2C_SAA7111 0x24 36#define I2C_SAA7111 0x24
37 37
38#define MXB_BOARD_CAN_DO_VBI(dev) (dev->revision != 0) 38#define MXB_BOARD_CAN_DO_VBI(dev) (dev->revision != 0)
39 39
40/* global variable */ 40/* global variable */
41static int mxb_num = 0; 41static int mxb_num = 0;
42 42
43/* initial frequence the tuner will be tuned to. 43/* initial frequence the tuner will be tuned to.
44 in verden (lower saxony, germany) 4148 is a 44 in verden (lower saxony, germany) 4148 is a
45 channel called "phoenix" */ 45 channel called "phoenix" */
46static int freq = 4148; 46static int freq = 4148;
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off).");
55enum { TUNER, AUX1, AUX3, AUX3_YC }; 55enum { TUNER, AUX1, AUX3, AUX3_YC };
56 56
57static struct v4l2_input mxb_inputs[MXB_INPUTS] = { 57static struct v4l2_input mxb_inputs[MXB_INPUTS] = {
58 { TUNER, "Tuner", V4L2_INPUT_TYPE_TUNER, 1, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, 58 { TUNER, "Tuner", V4L2_INPUT_TYPE_TUNER, 1, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 },
59 { AUX1, "AUX1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, 59 { AUX1, "AUX1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 },
60 { AUX3, "AUX3 Composite", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, 60 { AUX3, "AUX3 Composite", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 },
61 { AUX3_YC, "AUX3 S-Video", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, 61 { AUX3_YC, "AUX3 S-Video", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 },
@@ -66,7 +66,7 @@ static struct v4l2_input mxb_inputs[MXB_INPUTS] = {
66static struct { 66static struct {
67 int hps_source; 67 int hps_source;
68 int hps_sync; 68 int hps_sync;
69} input_port_selection[MXB_INPUTS] = { 69} input_port_selection[MXB_INPUTS] = {
70 { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A }, 70 { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A },
71 { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A }, 71 { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A },
72 { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A }, 72 { SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A },
@@ -81,7 +81,7 @@ static int video_audio_connect[MXB_INPUTS] =
81/* these are the necessary input-output-pins for bringing one audio source 81/* these are the necessary input-output-pins for bringing one audio source
82(see above) to the CD-output */ 82(see above) to the CD-output */
83static struct tea6420_multiplex TEA6420_cd[MXB_AUDIOS+1][2] = 83static struct tea6420_multiplex TEA6420_cd[MXB_AUDIOS+1][2] =
84 { 84 {
85 {{1,1,0},{1,1,0}}, /* Tuner */ 85 {{1,1,0},{1,1,0}}, /* Tuner */
86 {{5,1,0},{6,1,0}}, /* AUX 1 */ 86 {{5,1,0},{6,1,0}}, /* AUX 1 */
87 {{4,1,0},{6,1,0}}, /* AUX 2 */ 87 {{4,1,0},{6,1,0}}, /* AUX 2 */
@@ -122,8 +122,8 @@ static struct saa7146_extension_ioctls ioctls[] = {
122 { VIDIOC_S_FREQUENCY, SAA7146_EXCLUSIVE }, 122 { VIDIOC_S_FREQUENCY, SAA7146_EXCLUSIVE },
123 { VIDIOC_G_AUDIO, SAA7146_EXCLUSIVE }, 123 { VIDIOC_G_AUDIO, SAA7146_EXCLUSIVE },
124 { VIDIOC_S_AUDIO, SAA7146_EXCLUSIVE }, 124 { VIDIOC_S_AUDIO, SAA7146_EXCLUSIVE },
125 { MXB_S_AUDIO_CD, SAA7146_EXCLUSIVE }, /* custom control */ 125 { MXB_S_AUDIO_CD, SAA7146_EXCLUSIVE }, /* custom control */
126 { MXB_S_AUDIO_LINE, SAA7146_EXCLUSIVE }, /* custom control */ 126 { MXB_S_AUDIO_LINE, SAA7146_EXCLUSIVE }, /* custom control */
127 { 0, 0 } 127 { 0, 0 }
128}; 128};
129 129
@@ -132,7 +132,7 @@ struct mxb
132 struct video_device *video_dev; 132 struct video_device *video_dev;
133 struct video_device *vbi_dev; 133 struct video_device *vbi_dev;
134 134
135 struct i2c_adapter i2c_adapter; 135 struct i2c_adapter i2c_adapter;
136 136
137 struct i2c_client* saa7111a; 137 struct i2c_client* saa7111a;
138 struct i2c_client* tda9840; 138 struct i2c_client* tda9840;
@@ -200,15 +200,15 @@ static int mxb_probe(struct saa7146_dev* dev)
200 client = list_entry(item, struct i2c_client, list); 200 client = list_entry(item, struct i2c_client, list);
201 if( I2C_TEA6420_1 == client->addr ) 201 if( I2C_TEA6420_1 == client->addr )
202 mxb->tea6420_1 = client; 202 mxb->tea6420_1 = client;
203 if( I2C_TEA6420_2 == client->addr ) 203 if( I2C_TEA6420_2 == client->addr )
204 mxb->tea6420_2 = client; 204 mxb->tea6420_2 = client;
205 if( I2C_TEA6415C_2 == client->addr ) 205 if( I2C_TEA6415C_2 == client->addr )
206 mxb->tea6415c = client; 206 mxb->tea6415c = client;
207 if( I2C_TDA9840 == client->addr ) 207 if( I2C_TDA9840 == client->addr )
208 mxb->tda9840 = client; 208 mxb->tda9840 = client;
209 if( I2C_SAA7111 == client->addr ) 209 if( I2C_SAA7111 == client->addr )
210 mxb->saa7111a = client; 210 mxb->saa7111a = client;
211 if( 0x60 == client->addr ) 211 if( 0x60 == client->addr )
212 mxb->tuner = client; 212 mxb->tuner = client;
213 } 213 }
214 214
@@ -222,7 +222,7 @@ static int mxb_probe(struct saa7146_dev* dev)
222 return -ENODEV; 222 return -ENODEV;
223 } 223 }
224 224
225 /* all devices are present, probe was successful */ 225 /* all devices are present, probe was successful */
226 226
227 /* we store the pointer in our private data field */ 227 /* we store the pointer in our private data field */
228 dev->ext_priv = mxb; 228 dev->ext_priv = mxb;
@@ -230,7 +230,7 @@ static int mxb_probe(struct saa7146_dev* dev)
230 return 0; 230 return 0;
231} 231}
232 232
233/* some init data for the saa7740, the so-called 'sound arena module'. 233/* some init data for the saa7740, the so-called 'sound arena module'.
234 there are no specs available, so we simply use some init values */ 234 there are no specs available, so we simply use some init values */
235static struct { 235static struct {
236 int length; 236 int length;
@@ -330,7 +330,7 @@ static int mxb_init_done(struct saa7146_dev* dev)
330 v4l2_std_id std = V4L2_STD_PAL_BG; 330 v4l2_std_id std = V4L2_STD_PAL_BG;
331 331
332 int i = 0, err = 0; 332 int i = 0, err = 0;
333 struct tea6415c_multiplex vm; 333 struct tea6415c_multiplex vm;
334 334
335 /* select video mode in saa7111a */ 335 /* select video mode in saa7111a */
336 i = VIDEO_MODE_PAL; 336 i = VIDEO_MODE_PAL;
@@ -380,16 +380,16 @@ static int mxb_init_done(struct saa7146_dev* dev)
380 vm.in = 3; 380 vm.in = 3;
381 vm.out = 13; 381 vm.out = 13;
382 mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm); 382 mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm);
383 383
384 /* the rest for mxb */ 384 /* the rest for mxb */
385 mxb->cur_input = 0; 385 mxb->cur_input = 0;
386 mxb->cur_mute = 1; 386 mxb->cur_mute = 1;
387 387
388 mxb->cur_mode = V4L2_TUNER_MODE_STEREO; 388 mxb->cur_mode = V4L2_TUNER_MODE_STEREO;
389 mxb->tda9840->driver->command(mxb->tda9840, TDA9840_SWITCH, &mxb->cur_mode); 389 mxb->tda9840->driver->command(mxb->tda9840, TDA9840_SWITCH, &mxb->cur_mode);
390 390
391 /* check if the saa7740 (aka 'sound arena module') is present 391 /* check if the saa7740 (aka 'sound arena module') is present
392 on the mxb. if so, we must initialize it. due to lack of 392 on the mxb. if so, we must initialize it. due to lack of
393 informations about the saa7740, the values were reverse 393 informations about the saa7740, the values were reverse
394 engineered. */ 394 engineered. */
395 msg.addr = 0x1b; 395 msg.addr = 0x1b;
@@ -409,7 +409,7 @@ static int mxb_init_done(struct saa7146_dev* dev)
409 break; 409 break;
410 } 410 }
411 411
412 msg.len = mxb_saa7740_init[i].length; 412 msg.len = mxb_saa7740_init[i].length;
413 msg.buf = &mxb_saa7740_init[i].data[0]; 413 msg.buf = &mxb_saa7740_init[i].data[0];
414 if( 1 != (err = i2c_transfer(&mxb->i2c_adapter, &msg, 1))) { 414 if( 1 != (err = i2c_transfer(&mxb->i2c_adapter, &msg, 1))) {
415 DEB_D(("failed to initialize 'sound arena module'.\n")); 415 DEB_D(("failed to initialize 'sound arena module'.\n"));
@@ -418,12 +418,12 @@ static int mxb_init_done(struct saa7146_dev* dev)
418 } 418 }
419 INFO(("'sound arena module' detected.\n")); 419 INFO(("'sound arena module' detected.\n"));
420 } 420 }
421err: 421err:
422 /* the rest for saa7146: you should definitely set some basic values 422 /* the rest for saa7146: you should definitely set some basic values
423 for the input-port handling of the saa7146. */ 423 for the input-port handling of the saa7146. */
424 424
425 /* ext->saa has been filled by the core driver */ 425 /* ext->saa has been filled by the core driver */
426 426
427 /* some stuff is done via variables */ 427 /* some stuff is done via variables */
428 saa7146_set_hps_source_and_sync(dev, input_port_selection[mxb->cur_input].hps_source, input_port_selection[mxb->cur_input].hps_sync); 428 saa7146_set_hps_source_and_sync(dev, input_port_selection[mxb->cur_input].hps_source, input_port_selection[mxb->cur_input].hps_sync);
429 429
@@ -431,7 +431,7 @@ err:
431 431
432 /* this is ugly, but because of the fact that this is completely 432 /* this is ugly, but because of the fact that this is completely
433 hardware dependend, it should be done directly... */ 433 hardware dependend, it should be done directly... */
434 saa7146_write(dev, DD1_STREAM_B, 0x00000000); 434 saa7146_write(dev, DD1_STREAM_B, 0x00000000);
435 saa7146_write(dev, DD1_INIT, 0x02000200); 435 saa7146_write(dev, DD1_INIT, 0x02000200);
436 saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); 436 saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
437 437
@@ -453,7 +453,7 @@ static struct saa7146_ext_vv vv_data;
453static int mxb_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data *info) 453static int mxb_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data *info)
454{ 454{
455 struct mxb* mxb = (struct mxb*)dev->ext_priv; 455 struct mxb* mxb = (struct mxb*)dev->ext_priv;
456 456
457 DEB_EE(("dev:%p\n",dev)); 457 DEB_EE(("dev:%p\n",dev));
458 458
459 /* checking for i2c-devices can be omitted here, because we 459 /* checking for i2c-devices can be omitted here, because we
@@ -464,7 +464,7 @@ static int mxb_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data
464 ERR(("cannot register capture v4l2 device. skipping.\n")); 464 ERR(("cannot register capture v4l2 device. skipping.\n"));
465 return -1; 465 return -1;
466 } 466 }
467 467
468 /* initialization stuff (vbi) (only for revision > 0 and for extensions which want it)*/ 468 /* initialization stuff (vbi) (only for revision > 0 and for extensions which want it)*/
469 if( 0 != MXB_BOARD_CAN_DO_VBI(dev)) { 469 if( 0 != MXB_BOARD_CAN_DO_VBI(dev)) {
470 if( 0 != saa7146_register_device(&mxb->vbi_dev, dev, "mxb", VFL_TYPE_VBI)) { 470 if( 0 != saa7146_register_device(&mxb->vbi_dev, dev, "mxb", VFL_TYPE_VBI)) {
@@ -513,17 +513,17 @@ static int mxb_detach(struct saa7146_dev* dev)
513 return 0; 513 return 0;
514} 514}
515 515
516static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg) 516static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
517{ 517{
518 struct saa7146_dev *dev = fh->dev; 518 struct saa7146_dev *dev = fh->dev;
519 struct mxb* mxb = (struct mxb*)dev->ext_priv; 519 struct mxb* mxb = (struct mxb*)dev->ext_priv;
520 struct saa7146_vv *vv = dev->vv_data; 520 struct saa7146_vv *vv = dev->vv_data;
521 521
522 switch(cmd) { 522 switch(cmd) {
523 case VIDIOC_ENUMINPUT: 523 case VIDIOC_ENUMINPUT:
524 { 524 {
525 struct v4l2_input *i = arg; 525 struct v4l2_input *i = arg;
526 526
527 DEB_EE(("VIDIOC_ENUMINPUT %d.\n",i->index)); 527 DEB_EE(("VIDIOC_ENUMINPUT %d.\n",i->index));
528 if( i->index < 0 || i->index >= MXB_INPUTS) { 528 if( i->index < 0 || i->index >= MXB_INPUTS) {
529 return -EINVAL; 529 return -EINVAL;
@@ -559,11 +559,11 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
559 break; 559 break;
560 } 560 }
561 } 561 }
562 562
563 if( i < 0 ) { 563 if( i < 0 ) {
564 return -EAGAIN; 564 return -EAGAIN;
565 } 565 }
566 566
567 switch (vc->id ) { 567 switch (vc->id ) {
568 case V4L2_CID_AUDIO_MUTE: { 568 case V4L2_CID_AUDIO_MUTE: {
569 vc->value = mxb->cur_mute; 569 vc->value = mxb->cur_mute;
@@ -571,7 +571,7 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
571 return 0; 571 return 0;
572 } 572 }
573 } 573 }
574 574
575 DEB_EE(("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d.\n",vc->value)); 575 DEB_EE(("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d.\n",vc->value));
576 return 0; 576 return 0;
577 } 577 }
@@ -580,17 +580,17 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
580 { 580 {
581 struct v4l2_control *vc = arg; 581 struct v4l2_control *vc = arg;
582 int i = 0; 582 int i = 0;
583 583
584 for (i = MAXCONTROLS - 1; i >= 0; i--) { 584 for (i = MAXCONTROLS - 1; i >= 0; i--) {
585 if (mxb_controls[i].id == vc->id) { 585 if (mxb_controls[i].id == vc->id) {
586 break; 586 break;
587 } 587 }
588 } 588 }
589 589
590 if( i < 0 ) { 590 if( i < 0 ) {
591 return -EAGAIN; 591 return -EAGAIN;
592 } 592 }
593 593
594 switch (vc->id ) { 594 switch (vc->id ) {
595 case V4L2_CID_AUDIO_MUTE: { 595 case V4L2_CID_AUDIO_MUTE: {
596 mxb->cur_mute = vc->value; 596 mxb->cur_mute = vc->value;
@@ -614,12 +614,12 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
614 *input = mxb->cur_input; 614 *input = mxb->cur_input;
615 615
616 DEB_EE(("VIDIOC_G_INPUT %d.\n",*input)); 616 DEB_EE(("VIDIOC_G_INPUT %d.\n",*input));
617 return 0; 617 return 0;
618 } 618 }
619 case VIDIOC_S_INPUT: 619 case VIDIOC_S_INPUT:
620 { 620 {
621 int input = *(int *)arg; 621 int input = *(int *)arg;
622 struct tea6415c_multiplex vm; 622 struct tea6415c_multiplex vm;
623 int i = 0; 623 int i = 0;
624 624
625 DEB_EE(("VIDIOC_S_INPUT %d.\n",input)); 625 DEB_EE(("VIDIOC_S_INPUT %d.\n",input));
@@ -627,34 +627,34 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
627 if (input < 0 || input >= MXB_INPUTS) { 627 if (input < 0 || input >= MXB_INPUTS) {
628 return -EINVAL; 628 return -EINVAL;
629 } 629 }
630 630
631 /* fixme: locke das setzen des inputs mit hilfe des mutexes 631 /* fixme: locke das setzen des inputs mit hilfe des mutexes
632 down(&dev->lock); 632 mutex_lock(&dev->lock);
633 video_mux(dev,*i); 633 video_mux(dev,*i);
634 up(&dev->lock); 634 mutex_unlock(&dev->lock);
635 */ 635 */
636 636
637 /* fixme: check if streaming capture 637 /* fixme: check if streaming capture
638 if ( 0 != dev->streaming ) { 638 if ( 0 != dev->streaming ) {
639 DEB_D(("VIDIOC_S_INPUT illegal while streaming.\n")); 639 DEB_D(("VIDIOC_S_INPUT illegal while streaming.\n"));
640 return -EPERM; 640 return -EPERM;
641 } 641 }
642 */ 642 */
643 643
644 mxb->cur_input = input; 644 mxb->cur_input = input;
645 645
646 saa7146_set_hps_source_and_sync(dev, input_port_selection[input].hps_source, input_port_selection[input].hps_sync); 646 saa7146_set_hps_source_and_sync(dev, input_port_selection[input].hps_source, input_port_selection[input].hps_sync);
647 647
648 /* prepare switching of tea6415c and saa7111a; 648 /* prepare switching of tea6415c and saa7111a;
649 have a look at the 'background'-file for further informations */ 649 have a look at the 'background'-file for further informations */
650 switch( input ) { 650 switch( input ) {
651 651
652 case TUNER: 652 case TUNER:
653 { 653 {
654 i = 0; 654 i = 0;
655 vm.in = 3; 655 vm.in = 3;
656 vm.out = 17; 656 vm.out = 17;
657 657
658 if ( 0 != mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm)) { 658 if ( 0 != mxb->tea6415c->driver->command(mxb->tea6415c,TEA6415C_SWITCH, &vm)) {
659 printk("VIDIOC_S_INPUT: could not address tea6415c #1\n"); 659 printk("VIDIOC_S_INPUT: could not address tea6415c #1\n");
660 return -EFAULT; 660 return -EFAULT;
@@ -662,7 +662,7 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
662 /* connect tuner-output always to multicable */ 662 /* connect tuner-output always to multicable */
663 vm.in = 3; 663 vm.in = 3;
664 vm.out = 13; 664 vm.out = 13;
665 break; 665 break;
666 } 666 }
667 case AUX3_YC: 667 case AUX3_YC:
668 { 668 {
@@ -703,11 +703,11 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
703 break; 703 break;
704 } 704 }
705 } 705 }
706 706
707 /* switch video in saa7111a */ 707 /* switch video in saa7111a */
708 if ( 0 != mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_INPUT, &i)) { 708 if ( 0 != mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_INPUT, &i)) {
709 printk("VIDIOC_S_INPUT: could not address saa7111a #1.\n"); 709 printk("VIDIOC_S_INPUT: could not address saa7111a #1.\n");
710 } 710 }
711 711
712 /* switch the audio-source only if necessary */ 712 /* switch the audio-source only if necessary */
713 if( 0 == mxb->cur_mute ) { 713 if( 0 == mxb->cur_mute ) {
@@ -738,11 +738,11 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
738 t->rangehigh = 13684; /* 855.25 MHz / 62.5 kHz = 13684 */ 738 t->rangehigh = 13684; /* 855.25 MHz / 62.5 kHz = 13684 */
739 /* FIXME: add the real signal strength here */ 739 /* FIXME: add the real signal strength here */
740 t->signal = 0xffff; 740 t->signal = 0xffff;
741 t->afc = 0; 741 t->afc = 0;
742 742
743 mxb->tda9840->driver->command(mxb->tda9840,TDA9840_DETECT, &byte); 743 mxb->tda9840->driver->command(mxb->tda9840,TDA9840_DETECT, &byte);
744 t->audmode = mxb->cur_mode; 744 t->audmode = mxb->cur_mode;
745 745
746 if( byte < 0 ) { 746 if( byte < 0 ) {
747 t->rxsubchans = V4L2_TUNER_SUB_MONO; 747 t->rxsubchans = V4L2_TUNER_SUB_MONO;
748 } else { 748 } else {
@@ -777,12 +777,12 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
777 struct v4l2_tuner *t = arg; 777 struct v4l2_tuner *t = arg;
778 int result = 0; 778 int result = 0;
779 int byte = 0; 779 int byte = 0;
780 780
781 if( 0 != t->index ) { 781 if( 0 != t->index ) {
782 DEB_D(("VIDIOC_S_TUNER: channel %d does not have a tuner attached.\n",t->index)); 782 DEB_D(("VIDIOC_S_TUNER: channel %d does not have a tuner attached.\n",t->index));
783 return -EINVAL; 783 return -EINVAL;
784 } 784 }
785 785
786 switch(t->audmode) { 786 switch(t->audmode) {
787 case V4L2_TUNER_MODE_STEREO: { 787 case V4L2_TUNER_MODE_STEREO: {
788 mxb->cur_mode = V4L2_TUNER_MODE_STEREO; 788 mxb->cur_mode = V4L2_TUNER_MODE_STEREO;
@@ -813,7 +813,7 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
813 if( 0 != (result = mxb->tda9840->driver->command(mxb->tda9840, TDA9840_SWITCH, &byte))) { 813 if( 0 != (result = mxb->tda9840->driver->command(mxb->tda9840, TDA9840_SWITCH, &byte))) {
814 printk("VIDIOC_S_TUNER error. result:%d, byte:%d\n",result,byte); 814 printk("VIDIOC_S_TUNER error. result:%d, byte:%d\n",result,byte);
815 } 815 }
816 816
817 return 0; 817 return 0;
818 } 818 }
819 case VIDIOC_G_FREQUENCY: 819 case VIDIOC_G_FREQUENCY:
@@ -839,7 +839,7 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
839 839
840 if (V4L2_TUNER_ANALOG_TV != f->type) 840 if (V4L2_TUNER_ANALOG_TV != f->type)
841 return -EINVAL; 841 return -EINVAL;
842 842
843 if(0 != mxb->cur_input) { 843 if(0 != mxb->cur_input) {
844 DEB_D(("VIDIOC_S_FREQ: channel %d does not have a tuner!\n",mxb->cur_input)); 844 DEB_D(("VIDIOC_S_FREQ: channel %d does not have a tuner!\n",mxb->cur_input));
845 return -EINVAL; 845 return -EINVAL;
@@ -848,7 +848,7 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
848 mxb->cur_freq = *f; 848 mxb->cur_freq = *f;
849 DEB_EE(("VIDIOC_S_FREQUENCY: freq:0x%08x.\n", mxb->cur_freq.frequency)); 849 DEB_EE(("VIDIOC_S_FREQUENCY: freq:0x%08x.\n", mxb->cur_freq.frequency));
850 850
851 /* tune in desired frequency */ 851 /* tune in desired frequency */
852 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY, &mxb->cur_freq); 852 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY, &mxb->cur_freq);
853 853
854 /* hack: changing the frequency should invalidate the vbi-counter (=> alevt) */ 854 /* hack: changing the frequency should invalidate the vbi-counter (=> alevt) */
@@ -861,12 +861,12 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
861 case MXB_S_AUDIO_CD: 861 case MXB_S_AUDIO_CD:
862 { 862 {
863 int i = *(int*)arg; 863 int i = *(int*)arg;
864 864
865 if( i < 0 || i >= MXB_AUDIOS ) { 865 if( i < 0 || i >= MXB_AUDIOS ) {
866 DEB_D(("illegal argument to MXB_S_AUDIO_CD: i:%d.\n",i)); 866 DEB_D(("illegal argument to MXB_S_AUDIO_CD: i:%d.\n",i));
867 return -EINVAL; 867 return -EINVAL;
868 } 868 }
869 869
870 DEB_EE(("MXB_S_AUDIO_CD: i:%d.\n",i)); 870 DEB_EE(("MXB_S_AUDIO_CD: i:%d.\n",i));
871 871
872 mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_cd[i][0]); 872 mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_cd[i][0]);
@@ -877,12 +877,12 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
877 case MXB_S_AUDIO_LINE: 877 case MXB_S_AUDIO_LINE:
878 { 878 {
879 int i = *(int*)arg; 879 int i = *(int*)arg;
880 880
881 if( i < 0 || i >= MXB_AUDIOS ) { 881 if( i < 0 || i >= MXB_AUDIOS ) {
882 DEB_D(("illegal argument to MXB_S_AUDIO_LINE: i:%d.\n",i)); 882 DEB_D(("illegal argument to MXB_S_AUDIO_LINE: i:%d.\n",i));
883 return -EINVAL; 883 return -EINVAL;
884 } 884 }
885 885
886 DEB_EE(("MXB_S_AUDIO_LINE: i:%d.\n",i)); 886 DEB_EE(("MXB_S_AUDIO_LINE: i:%d.\n",i));
887 mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[i][0]); 887 mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[i][0]);
888 mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[i][1]); 888 mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[i][1]);
@@ -894,13 +894,13 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
894 struct v4l2_audio *a = arg; 894 struct v4l2_audio *a = arg;
895 895
896 if( a->index < 0 || a->index > MXB_INPUTS ) { 896 if( a->index < 0 || a->index > MXB_INPUTS ) {
897 DEB_D(("VIDIOC_G_AUDIO %d out of range.\n",a->index)); 897 DEB_D(("VIDIOC_G_AUDIO %d out of range.\n",a->index));
898 return -EINVAL; 898 return -EINVAL;
899 } 899 }
900 900
901 DEB_EE(("VIDIOC_G_AUDIO %d.\n",a->index)); 901 DEB_EE(("VIDIOC_G_AUDIO %d.\n",a->index));
902 memcpy(a, &mxb_audios[video_audio_connect[mxb->cur_input]], sizeof(struct v4l2_audio)); 902 memcpy(a, &mxb_audios[video_audio_connect[mxb->cur_input]], sizeof(struct v4l2_audio));
903 903
904 return 0; 904 return 0;
905 } 905 }
906 case VIDIOC_S_AUDIO: 906 case VIDIOC_S_AUDIO:
@@ -908,7 +908,7 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
908 struct v4l2_audio *a = arg; 908 struct v4l2_audio *a = arg;
909 DEB_D(("VIDIOC_S_AUDIO %d.\n",a->index)); 909 DEB_D(("VIDIOC_S_AUDIO %d.\n",a->index));
910 return 0; 910 return 0;
911 } 911 }
912 default: 912 default:
913/* 913/*
914 DEB2(printk("does not handle this ioctl.\n")); 914 DEB2(printk("does not handle this ioctl.\n"));
@@ -928,7 +928,7 @@ static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std)
928 v4l2_std_id std = V4L2_STD_PAL_I; 928 v4l2_std_id std = V4L2_STD_PAL_I;
929 DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n")); 929 DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n"));
930 /* set the 7146 gpio register -- I don't know what this does exactly */ 930 /* set the 7146 gpio register -- I don't know what this does exactly */
931 saa7146_write(dev, GPIO_CTRL, 0x00404050); 931 saa7146_write(dev, GPIO_CTRL, 0x00404050);
932 /* unset the 7111 gpio register -- I don't know what this does exactly */ 932 /* unset the 7111 gpio register -- I don't know what this does exactly */
933 mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &zero); 933 mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &zero);
934 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std); 934 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std);
@@ -936,7 +936,7 @@ static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std)
936 v4l2_std_id std = V4L2_STD_PAL_BG; 936 v4l2_std_id std = V4L2_STD_PAL_BG;
937 DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n")); 937 DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n"));
938 /* set the 7146 gpio register -- I don't know what this does exactly */ 938 /* set the 7146 gpio register -- I don't know what this does exactly */
939 saa7146_write(dev, GPIO_CTRL, 0x00404050); 939 saa7146_write(dev, GPIO_CTRL, 0x00404050);
940 /* set the 7111 gpio register -- I don't know what this does exactly */ 940 /* set the 7111 gpio register -- I don't know what this does exactly */
941 mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &one); 941 mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &one);
942 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std); 942 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std);
@@ -969,8 +969,8 @@ static struct saa7146_standard standard[] = {
969}; 969};
970 970
971static struct saa7146_pci_extension_data mxb = { 971static struct saa7146_pci_extension_data mxb = {
972 .ext_priv = "Multimedia eXtension Board", 972 .ext_priv = "Multimedia eXtension Board",
973 .ext = &extension, 973 .ext = &extension,
974}; 974};
975 975
976static struct pci_device_id pci_tbl[] = { 976static struct pci_device_id pci_tbl[] = {
@@ -992,7 +992,7 @@ static struct saa7146_ext_vv vv_data = {
992 .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE, 992 .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE,
993 .stds = &standard[0], 993 .stds = &standard[0],
994 .num_stds = sizeof(standard)/sizeof(struct saa7146_standard), 994 .num_stds = sizeof(standard)/sizeof(struct saa7146_standard),
995 .std_callback = &std_callback, 995 .std_callback = &std_callback,
996 .ioctls = &ioctls[0], 996 .ioctls = &ioctls[0],
997 .ioctl = mxb_ioctl, 997 .ioctl = mxb_ioctl,
998}; 998};
@@ -1000,7 +1000,7 @@ static struct saa7146_ext_vv vv_data = {
1000static struct saa7146_extension extension = { 1000static struct saa7146_extension extension = {
1001 .name = MXB_IDENTIFIER, 1001 .name = MXB_IDENTIFIER,
1002 .flags = SAA7146_USE_I2C_IRQ, 1002 .flags = SAA7146_USE_I2C_IRQ,
1003 1003
1004 .pci_tbl = &pci_tbl[0], 1004 .pci_tbl = &pci_tbl[0],
1005 .module = THIS_MODULE, 1005 .module = THIS_MODULE,
1006 1006
@@ -1010,7 +1010,7 @@ static struct saa7146_extension extension = {
1010 1010
1011 .irq_mask = 0, 1011 .irq_mask = 0,
1012 .irq_func = NULL, 1012 .irq_func = NULL,
1013}; 1013};
1014 1014
1015static int __init mxb_init_module(void) 1015static int __init mxb_init_module(void)
1016{ 1016{
@@ -1018,7 +1018,7 @@ static int __init mxb_init_module(void)
1018 DEB_S(("failed to register extension.\n")); 1018 DEB_S(("failed to register extension.\n"));
1019 return -ENODEV; 1019 return -ENODEV;
1020 } 1020 }
1021 1021
1022 return 0; 1022 return 0;
1023} 1023}
1024 1024
diff --git a/drivers/media/video/mxb.h b/drivers/media/video/mxb.h
index 2332ed5f7c6b..400a57ba62ec 100644
--- a/drivers/media/video/mxb.h
+++ b/drivers/media/video/mxb.h
@@ -38,5 +38,5 @@ static struct v4l2_audio mxb_audios[MXB_AUDIOS] = {
38 .name = "CD-ROM (X10)", 38 .name = "CD-ROM (X10)",
39 .capability = V4L2_AUDCAP_STEREO, 39 .capability = V4L2_AUDCAP_STEREO,
40 } 40 }
41}; 41};
42#endif 42#endif
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index f3fc361bec97..15fd85acabda 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -48,7 +48,7 @@
48#include <asm/pgtable.h> 48#include <asm/pgtable.h>
49#include <asm/page.h> 49#include <asm/page.h>
50#include <asm/irq.h> 50#include <asm/irq.h>
51#include <asm/semaphore.h> 51#include <linux/mutex.h>
52 52
53#include "planb.h" 53#include "planb.h"
54#include "saa7196.h" 54#include "saa7196.h"
@@ -329,12 +329,12 @@ static volatile struct dbdma_cmd *cmd_geo_setup(
329 329
330static inline void planb_lock(struct planb *pb) 330static inline void planb_lock(struct planb *pb)
331{ 331{
332 down(&pb->lock); 332 mutex_lock(&pb->lock);
333} 333}
334 334
335static inline void planb_unlock(struct planb *pb) 335static inline void planb_unlock(struct planb *pb)
336{ 336{
337 up(&pb->lock); 337 mutex_unlock(&pb->lock);
338} 338}
339 339
340/***************/ 340/***************/
@@ -2067,7 +2067,7 @@ static int init_planb(struct planb *pb)
2067#endif 2067#endif
2068 pb->tab_size = PLANB_MAXLINES + 40; 2068 pb->tab_size = PLANB_MAXLINES + 40;
2069 pb->suspend = 0; 2069 pb->suspend = 0;
2070 init_MUTEX(&pb->lock); 2070 mutex_init(&pb->lock);
2071 pb->ch1_cmd = 0; 2071 pb->ch1_cmd = 0;
2072 pb->ch2_cmd = 0; 2072 pb->ch2_cmd = 0;
2073 pb->mask = 0; 2073 pb->mask = 0;
diff --git a/drivers/media/video/planb.h b/drivers/media/video/planb.h
index 8a0faad16118..79b6b561426e 100644
--- a/drivers/media/video/planb.h
+++ b/drivers/media/video/planb.h
@@ -174,7 +174,7 @@ struct planb {
174 int user; 174 int user;
175 unsigned int tab_size; 175 unsigned int tab_size;
176 int maxlines; 176 int maxlines;
177 struct semaphore lock; 177 struct mutex lock;
178 unsigned int irq; /* interrupt number */ 178 unsigned int irq; /* interrupt number */
179 volatile unsigned int intr_mask; 179 volatile unsigned int intr_mask;
180 180
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 9e6448639480..05ca55939e77 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -30,6 +30,8 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/videodev.h> 32#include <linux/videodev.h>
33#include <linux/mutex.h>
34
33#include <asm/uaccess.h> 35#include <asm/uaccess.h>
34 36
35 37
@@ -44,7 +46,7 @@ struct pms_device
44 struct video_picture picture; 46 struct video_picture picture;
45 int height; 47 int height;
46 int width; 48 int width;
47 struct semaphore lock; 49 struct mutex lock;
48}; 50};
49 51
50struct i2c_info 52struct i2c_info
@@ -724,10 +726,10 @@ static int pms_do_ioctl(struct inode *inode, struct file *file,
724 struct video_channel *v = arg; 726 struct video_channel *v = arg;
725 if(v->channel<0 || v->channel>3) 727 if(v->channel<0 || v->channel>3)
726 return -EINVAL; 728 return -EINVAL;
727 down(&pd->lock); 729 mutex_lock(&pd->lock);
728 pms_videosource(v->channel&1); 730 pms_videosource(v->channel&1);
729 pms_vcrinput(v->channel>>1); 731 pms_vcrinput(v->channel>>1);
730 up(&pd->lock); 732 mutex_unlock(&pd->lock);
731 return 0; 733 return 0;
732 } 734 }
733 case VIDIOCGTUNER: 735 case VIDIOCGTUNER:
@@ -761,7 +763,7 @@ static int pms_do_ioctl(struct inode *inode, struct file *file,
761 struct video_tuner *v = arg; 763 struct video_tuner *v = arg;
762 if(v->tuner) 764 if(v->tuner)
763 return -EINVAL; 765 return -EINVAL;
764 down(&pd->lock); 766 mutex_lock(&pd->lock);
765 switch(v->mode) 767 switch(v->mode)
766 { 768 {
767 case VIDEO_MODE_AUTO: 769 case VIDEO_MODE_AUTO:
@@ -785,10 +787,10 @@ static int pms_do_ioctl(struct inode *inode, struct file *file,
785 pms_format(2); 787 pms_format(2);
786 break; 788 break;
787 default: 789 default:
788 up(&pd->lock); 790 mutex_unlock(&pd->lock);
789 return -EINVAL; 791 return -EINVAL;
790 } 792 }
791 up(&pd->lock); 793 mutex_unlock(&pd->lock);
792 return 0; 794 return 0;
793 } 795 }
794 case VIDIOCGPICT: 796 case VIDIOCGPICT:
@@ -809,12 +811,12 @@ static int pms_do_ioctl(struct inode *inode, struct file *file,
809 * Now load the card. 811 * Now load the card.
810 */ 812 */
811 813
812 down(&pd->lock); 814 mutex_lock(&pd->lock);
813 pms_brightness(p->brightness>>8); 815 pms_brightness(p->brightness>>8);
814 pms_hue(p->hue>>8); 816 pms_hue(p->hue>>8);
815 pms_colour(p->colour>>8); 817 pms_colour(p->colour>>8);
816 pms_contrast(p->contrast>>8); 818 pms_contrast(p->contrast>>8);
817 up(&pd->lock); 819 mutex_unlock(&pd->lock);
818 return 0; 820 return 0;
819 } 821 }
820 case VIDIOCSWIN: 822 case VIDIOCSWIN:
@@ -830,9 +832,9 @@ static int pms_do_ioctl(struct inode *inode, struct file *file,
830 return -EINVAL; 832 return -EINVAL;
831 pd->width=vw->width; 833 pd->width=vw->width;
832 pd->height=vw->height; 834 pd->height=vw->height;
833 down(&pd->lock); 835 mutex_lock(&pd->lock);
834 pms_resolution(pd->width, pd->height); 836 pms_resolution(pd->width, pd->height);
835 up(&pd->lock); /* Ok we figured out what to use from our wide choice */ 837 mutex_unlock(&pd->lock); /* Ok we figured out what to use from our wide choice */
836 return 0; 838 return 0;
837 } 839 }
838 case VIDIOCGWIN: 840 case VIDIOCGWIN:
@@ -872,9 +874,9 @@ static ssize_t pms_read(struct file *file, char __user *buf,
872 struct pms_device *pd=(struct pms_device *)v; 874 struct pms_device *pd=(struct pms_device *)v;
873 int len; 875 int len;
874 876
875 down(&pd->lock); 877 mutex_lock(&pd->lock);
876 len=pms_capture(pd, buf, (pd->picture.depth==16)?0:1,count); 878 len=pms_capture(pd, buf, (pd->picture.depth==16)?0:1,count);
877 up(&pd->lock); 879 mutex_unlock(&pd->lock);
878 return len; 880 return len;
879} 881}
880 882
@@ -1029,7 +1031,7 @@ static int __init init_pms_cards(void)
1029 return -ENODEV; 1031 return -ENODEV;
1030 } 1032 }
1031 memcpy(&pms_device, &pms_template, sizeof(pms_template)); 1033 memcpy(&pms_device, &pms_template, sizeof(pms_template));
1032 init_MUTEX(&pms_device.lock); 1034 mutex_init(&pms_device.lock);
1033 pms_device.height=240; 1035 pms_device.height=240;
1034 pms_device.width=320; 1036 pms_device.width=320;
1035 pms_swsense(75); 1037 pms_swsense(75);
diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c
index 2ce010201308..dd830e0e5e96 100644
--- a/drivers/media/video/saa5246a.c
+++ b/drivers/media/video/saa5246a.c
@@ -46,6 +46,8 @@
46#include <linux/i2c.h> 46#include <linux/i2c.h>
47#include <linux/videotext.h> 47#include <linux/videotext.h>
48#include <linux/videodev.h> 48#include <linux/videodev.h>
49#include <linux/mutex.h>
50
49#include "saa5246a.h" 51#include "saa5246a.h"
50 52
51MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>"); 53MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>");
@@ -57,7 +59,7 @@ struct saa5246a_device
57 u8 pgbuf[NUM_DAUS][VTX_VIRTUALSIZE]; 59 u8 pgbuf[NUM_DAUS][VTX_VIRTUALSIZE];
58 int is_searching[NUM_DAUS]; 60 int is_searching[NUM_DAUS];
59 struct i2c_client *client; 61 struct i2c_client *client;
60 struct semaphore lock; 62 struct mutex lock;
61}; 63};
62 64
63static struct video_device saa_template; /* Declared near bottom */ 65static struct video_device saa_template; /* Declared near bottom */
@@ -90,7 +92,7 @@ static int saa5246a_attach(struct i2c_adapter *adap, int addr, int kind)
90 return -ENOMEM; 92 return -ENOMEM;
91 } 93 }
92 strlcpy(client->name, IF_NAME, I2C_NAME_SIZE); 94 strlcpy(client->name, IF_NAME, I2C_NAME_SIZE);
93 init_MUTEX(&t->lock); 95 mutex_init(&t->lock);
94 96
95 /* 97 /*
96 * Now create a video4linux device 98 * Now create a video4linux device
@@ -719,9 +721,9 @@ static int saa5246a_ioctl(struct inode *inode, struct file *file,
719 int err; 721 int err;
720 722
721 cmd = vtx_fix_command(cmd); 723 cmd = vtx_fix_command(cmd);
722 down(&t->lock); 724 mutex_lock(&t->lock);
723 err = video_usercopy(inode, file, cmd, arg, do_saa5246a_ioctl); 725 err = video_usercopy(inode, file, cmd, arg, do_saa5246a_ioctl);
724 up(&t->lock); 726 mutex_unlock(&t->lock);
725 return err; 727 return err;
726} 728}
727 729
diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c
index 5694eb58c3a1..a9f3cf0b1e3c 100644
--- a/drivers/media/video/saa5249.c
+++ b/drivers/media/video/saa5249.c
@@ -56,6 +56,8 @@
56#include <linux/i2c.h> 56#include <linux/i2c.h>
57#include <linux/videotext.h> 57#include <linux/videotext.h>
58#include <linux/videodev.h> 58#include <linux/videodev.h>
59#include <linux/mutex.h>
60
59 61
60#include <asm/io.h> 62#include <asm/io.h>
61#include <asm/uaccess.h> 63#include <asm/uaccess.h>
@@ -105,7 +107,7 @@ struct saa5249_device
105 int disp_mode; 107 int disp_mode;
106 int virtual_mode; 108 int virtual_mode;
107 struct i2c_client *client; 109 struct i2c_client *client;
108 struct semaphore lock; 110 struct mutex lock;
109}; 111};
110 112
111 113
@@ -158,7 +160,7 @@ static int saa5249_attach(struct i2c_adapter *adap, int addr, int kind)
158 return -ENOMEM; 160 return -ENOMEM;
159 } 161 }
160 strlcpy(client->name, IF_NAME, I2C_NAME_SIZE); 162 strlcpy(client->name, IF_NAME, I2C_NAME_SIZE);
161 init_MUTEX(&t->lock); 163 mutex_init(&t->lock);
162 164
163 /* 165 /*
164 * Now create a video4linux device 166 * Now create a video4linux device
@@ -619,9 +621,9 @@ static int saa5249_ioctl(struct inode *inode, struct file *file,
619 int err; 621 int err;
620 622
621 cmd = vtx_fix_command(cmd); 623 cmd = vtx_fix_command(cmd);
622 down(&t->lock); 624 mutex_lock(&t->lock);
623 err = video_usercopy(inode,file,cmd,arg,do_saa5249_ioctl); 625 err = video_usercopy(inode,file,cmd,arg,do_saa5249_ioctl);
624 up(&t->lock); 626 mutex_unlock(&t->lock);
625 return err; 627 return err;
626} 628}
627 629
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index ffd87ce55556..b184fd00b4e7 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1,4 +1,4 @@
1/* saa7115 - Philips SAA7114/SAA7115 video decoder driver 1/* saa7115 - Philips SAA7113/SAA7114/SAA7115 video decoder driver
2 * 2 *
3 * Based on saa7114 driver by Maxim Yevtyushkin, which is based on 3 * Based on saa7114 driver by Maxim Yevtyushkin, which is based on
4 * the saa7111 driver by Dave Perks. 4 * the saa7111 driver by Dave Perks.
@@ -16,6 +16,7 @@
16 * (2/17/2003) 16 * (2/17/2003)
17 * 17 *
18 * VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl> 18 * VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl>
19 * SAA7113 support by Mauro Carvalho Chehab <mchehab@infradead.org>
19 * 20 *
20 * This program is free software; you can redistribute it and/or 21 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License 22 * modify it under the terms of the GNU General Public License
@@ -42,8 +43,9 @@
42#include <media/audiochip.h> 43#include <media/audiochip.h>
43#include <asm/div64.h> 44#include <asm/div64.h>
44 45
45MODULE_DESCRIPTION("Philips SAA7114/SAA7115 video decoder driver"); 46MODULE_DESCRIPTION("Philips SAA7113/SAA7114/SAA7115 video decoder driver");
46MODULE_AUTHOR("Maxim Yevtyushkin, Kevin Thayer, Chris Kennedy, Hans Verkuil"); 47MODULE_AUTHOR( "Maxim Yevtyushkin, Kevin Thayer, Chris Kennedy, "
48 "Hans Verkuil, Mauro Carvalho Chehab");
47MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
48 50
49static int debug = 0; 51static int debug = 0;
@@ -51,7 +53,10 @@ module_param(debug, bool, 0644);
51 53
52MODULE_PARM_DESC(debug, "Debug level (0-1)"); 54MODULE_PARM_DESC(debug, "Debug level (0-1)");
53 55
54static unsigned short normal_i2c[] = { 0x42 >> 1, 0x40 >> 1, I2C_CLIENT_END }; 56static unsigned short normal_i2c[] = {
57 0x4a >>1, 0x48 >>1, /* SAA7113 */
58 0x42 >> 1, 0x40 >> 1, /* SAA7114 and SAA7115 */
59 I2C_CLIENT_END };
55 60
56 61
57I2C_CLIENT_INSMOD; 62I2C_CLIENT_INSMOD;
@@ -101,10 +106,12 @@ static inline int saa7115_read(struct i2c_client *client, u8 reg)
101 Hauppauge driver sets. */ 106 Hauppauge driver sets. */
102 107
103static const unsigned char saa7115_init_auto_input[] = { 108static const unsigned char saa7115_init_auto_input[] = {
109 /* Front-End Part */
104 0x01, 0x48, /* white peak control disabled */ 110 0x01, 0x48, /* white peak control disabled */
105 0x03, 0x20, /* was 0x30. 0x20: long vertical blanking */ 111 0x03, 0x20, /* was 0x30. 0x20: long vertical blanking */
106 0x04, 0x90, /* analog gain set to 0 */ 112 0x04, 0x90, /* analog gain set to 0 */
107 0x05, 0x90, /* analog gain set to 0 */ 113 0x05, 0x90, /* analog gain set to 0 */
114 /* Decoder Part */
108 0x06, 0xeb, /* horiz sync begin = -21 */ 115 0x06, 0xeb, /* horiz sync begin = -21 */
109 0x07, 0xe0, /* horiz sync stop = -17 */ 116 0x07, 0xe0, /* horiz sync stop = -17 */
110 0x0a, 0x80, /* was 0x88. decoder brightness, 0x80 is itu standard */ 117 0x0a, 0x80, /* was 0x88. decoder brightness, 0x80 is itu standard */
@@ -123,6 +130,8 @@ static const unsigned char saa7115_init_auto_input[] = {
123 0x1b, 0x42, /* misc chroma control 0x42 = recommended */ 130 0x1b, 0x42, /* misc chroma control 0x42 = recommended */
124 0x1c, 0xa9, /* combfilter control 0xA9 = recommended */ 131 0x1c, 0xa9, /* combfilter control 0xA9 = recommended */
125 0x1d, 0x01, /* combfilter control 0x01 = recommended */ 132 0x1d, 0x01, /* combfilter control 0x01 = recommended */
133
134 /* Power Device Control */
126 0x88, 0xd0, /* reset device */ 135 0x88, 0xd0, /* reset device */
127 0x88, 0xf0, /* set device programmed, all in operational mode */ 136 0x88, 0xf0, /* set device programmed, all in operational mode */
128 0x00, 0x00 137 0x00, 0x00
@@ -338,6 +347,33 @@ static const unsigned char saa7115_cfg_vbi_off[] = {
338 0x00, 0x00 347 0x00, 0x00
339}; 348};
340 349
350static const unsigned char saa7113_init_auto_input[] = {
351 0x01, 0x08, /* PH7113_INCREMENT_DELAY - (1) (1) (1) (1) IDEL3 IDEL2 IDELL1 IDEL0 */
352 0x02, 0xc2, /* PH7113_ANALOG_INPUT_CONTR_1 - FUSE1 FUSE0 GUDL1 GUDL0 MODE3 MODE2 MODE1 MODE0 */
353 0x03, 0x30, /* PH7113_ANALOG_INPUT_CONTR_2 - (1) HLNRS VBSL WPOFF HOLDG GAFIX GAI28 GAI18 */
354 0x04, 0x00, /* PH7113_ANALOG_INPUT_CONTR_3 - GAI17 GAI16 GAI15 GAI14 GAI13 GAI12 GAI11 GAI10 */
355 0x05, 0x00, /* PH7113_ANALOG_INPUT_CONTR_4 - GAI27 GAI26 GAI25 GAI24 GAI23 GAI22 GAI21 GAI20 */
356 0x06, 0x89, /* PH7113_HORIZONTAL_SYNC_START - HSB7 HSB6 HSB5 HSB4 HSB3 HSB2 HSB1 HSB0 */
357 0x07, 0x0d, /* PH7113_HORIZONTAL_SYNC_STOP - HSS7 HSS6 HSS5 HSS4 HSS3 HSS2 HSS1 HSS0 */
358 0x08, 0x88, /* PH7113_SYNC_CONTROL - AUFD FSEL FOET HTC1 HTC0 HPLL VNOI1 VNOI0 */
359 0x09, 0x01, /* PH7113_LUMINANCE_CONTROL - BYPS PREF BPSS1 BPSS0 VBLB UPTCV APER1 APER0 */
360 0x0a, 0x80, /* PH7113_LUMINANCE_BRIGHTNESS - BRIG7 BRIG6 BRIG5 BRIG4 BRIG3 BRIG2 BRIG1 BRIG0 */
361 0x0b, 0x47, /* PH7113_LUMINANCE_CONTRAST - CONT7 CONT6 CONT5 CONT4 CONT3 CONT2 CONT1 CONT0 */
362 0x0c, 0x40, /* PH7113_CHROMA_SATURATION - SATN7 SATN6 SATN5 SATN4 SATN3 SATN2 SATN1 SATN0 */
363 0x0d, 0x00, /* PH7113_CHROMA_HUE_CONTROL - HUEC7 HUEC6 HUEC5 HUEC4 HUEC3 HUEC2 HUEC1 HUEC0 */
364 0x0e, 0x01, /* PH7113_CHROMA_CONTROL - CDTO CSTD2 CSTD1 CSTD0 DCCF FCTC CHBW1 CHBW0 */
365 0x0f, 0x2a, /* PH7113_CHROMA_GAIN_CONTROL - ACGC CGAIN6 CGAIN5 CGAIN4 CGAIN3 CGAIN2 CGAIN1 CGAIN0 */
366 0x10, 0x08, /* PH7113_FORMAT_DELAY_CONTROL - OFTS1 OFTS0 HDEL1 HDEL0 VRLN YDEL2 YDEL1 YDEL0 */
367 0x11, 0x0c, /* PH7113_OUTPUT_CONTROL_1 - GPSW1 CM99 GPSW0 HLSEL OEYC OERT VIPB COLO */
368 0x12, 0x07, /* PH7113_OUTPUT_CONTROL_2 - RTSE13 RTSE12 RTSE11 RTSE10 RTSE03 RTSE02 RTSE01 RTSE00 */
369 0x13, 0x00, /* PH7113_OUTPUT_CONTROL_3 - ADLSB (1) (1) OLDSB FIDP (1) AOSL1 AOSL0 */
370 0x14, 0x00, /* RESERVED 14 - (1) (1) (1) (1) (1) (1) (1) (1) */
371 0x15, 0x00, /* PH7113_V_GATE1_START - VSTA7 VSTA6 VSTA5 VSTA4 VSTA3 VSTA2 VSTA1 VSTA0 */
372 0x16, 0x00, /* PH7113_V_GATE1_STOP - VSTO7 VSTO6 VSTO5 VSTO4 VSTO3 VSTO2 VSTO1 VSTO0 */
373 0x17, 0x00, /* PH7113_V_GATE1_MSB - (1) (1) (1) (1) (1) (1) VSTO8 VSTA8 */
374 0x00, 0x00
375};
376
341static const unsigned char saa7115_init_misc[] = { 377static const unsigned char saa7115_init_misc[] = {
342 0x38, 0x03, /* audio stuff */ 378 0x38, 0x03, /* audio stuff */
343 0x39, 0x10, 379 0x39, 0x10,
@@ -677,10 +713,35 @@ static void saa7115_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
677 saa7115_writeregs(client, saa7115_cfg_50hz_video); 713 saa7115_writeregs(client, saa7115_cfg_50hz_video);
678 } 714 }
679 715
716 /* Register 0E - Bits D6-D4 on NO-AUTO mode
717 (SAA7113 doesn't have auto mode)
718 50 Hz / 625 lines 60 Hz / 525 lines
719 000 PAL BGDHI (4.43Mhz) NTSC M (3.58MHz)
720 001 NTSC 4.43 (50 Hz) PAL 4.43 (60 Hz)
721 010 Combination-PAL N (3.58MHz) NTSC 4.43 (60 Hz)
722 011 NTSC N (3.58MHz) PAL M (3.58MHz)
723 100 reserved NTSC-Japan (3.58MHz)
724 */
725 if (state->ident == V4L2_IDENT_SAA7113) {
726 u8 reg = saa7115_read(client, 0x0e) & 0x8f;
727
728 if (std == V4L2_STD_PAL_M) {
729 reg|=0x30;
730 } else if (std == V4L2_STD_PAL_N) {
731 reg|=0x20;
732 } else if (std == V4L2_STD_PAL_60) {
733 reg|=0x10;
734 } else if (std == V4L2_STD_NTSC_M_JP) {
735 reg|=0x40;
736 }
737 saa7115_write(client, 0x0e, reg);
738 }
739
740
680 state->std = std; 741 state->std = std;
681 742
682 /* restart task B if needed */ 743 /* restart task B if needed */
683 if (taskb && state->ident == V4L2_IDENT_SAA7114) { 744 if (taskb && state->ident != V4L2_IDENT_SAA7115) {
684 saa7115_writeregs(client, saa7115_cfg_vbi_on); 745 saa7115_writeregs(client, saa7115_cfg_vbi_on);
685 } 746 }
686 747
@@ -703,7 +764,7 @@ static void saa7115_log_status(struct i2c_client *client)
703 int vcr; 764 int vcr;
704 765
705 v4l_info(client, "Audio frequency: %d Hz\n", state->audclk_freq); 766 v4l_info(client, "Audio frequency: %d Hz\n", state->audclk_freq);
706 if (client->name[6] == '4') { 767 if (state->ident != V4L2_IDENT_SAA7115) {
707 /* status for the saa7114 */ 768 /* status for the saa7114 */
708 reg1f = saa7115_read(client, 0x1f); 769 reg1f = saa7115_read(client, 0x1f);
709 signalOk = (reg1f & 0xc1) == 0x81; 770 signalOk = (reg1f & 0xc1) == 0x81;
@@ -751,8 +812,8 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
751 u8 lcr[24]; 812 u8 lcr[24];
752 int i, x; 813 int i, x;
753 814
754 /* saa7114 doesn't yet support VBI */ 815 /* saa7113/71144 doesn't yet support VBI */
755 if (state->ident == V4L2_IDENT_SAA7114) 816 if (state->ident != V4L2_IDENT_SAA7115)
756 return; 817 return;
757 818
758 for (i = 0; i <= 23; i++) 819 for (i = 0; i <= 23; i++)
@@ -791,7 +852,7 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
791 case 0: 852 case 0:
792 lcr[i] |= 0xf << (4 * x); 853 lcr[i] |= 0xf << (4 * x);
793 break; 854 break;
794 case V4L2_SLICED_TELETEXT_B: 855 case V4L2_SLICED_TELETEXT_PAL_B:
795 lcr[i] |= 1 << (4 * x); 856 lcr[i] |= 1 << (4 * x);
796 break; 857 break;
797 case V4L2_SLICED_CAPTION_525: 858 case V4L2_SLICED_CAPTION_525:
@@ -820,7 +881,7 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
820static int saa7115_get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt) 881static int saa7115_get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt)
821{ 882{
822 static u16 lcr2vbi[] = { 883 static u16 lcr2vbi[] = {
823 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */ 884 0, V4L2_SLICED_TELETEXT_PAL_B, 0, /* 1 */
824 0, V4L2_SLICED_CAPTION_525, /* 4 */ 885 0, V4L2_SLICED_CAPTION_525, /* 4 */
825 V4L2_SLICED_WSS_625, 0, /* 5 */ 886 V4L2_SLICED_WSS_625, 0, /* 5 */
826 V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */ 887 V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */
@@ -985,7 +1046,7 @@ static void saa7115_decode_vbi_line(struct i2c_client *client,
985 /* decode payloads */ 1046 /* decode payloads */
986 switch (id2) { 1047 switch (id2) {
987 case 1: 1048 case 1:
988 vbi->type = V4L2_SLICED_TELETEXT_B; 1049 vbi->type = V4L2_SLICED_TELETEXT_PAL_B;
989 break; 1050 break;
990 case 4: 1051 case 4:
991 if (!saa7115_odd_parity(p[0]) || !saa7115_odd_parity(p[1])) 1052 if (!saa7115_odd_parity(p[0]) || !saa7115_odd_parity(p[1]))
@@ -1261,14 +1322,12 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
1261 1322
1262 saa7115_write(client, 0, 5); 1323 saa7115_write(client, 0, 5);
1263 chip_id = saa7115_read(client, 0) & 0x0f; 1324 chip_id = saa7115_read(client, 0) & 0x0f;
1264 if (chip_id != 4 && chip_id != 5) { 1325 if (chip_id <3 && chip_id > 5) {
1265 v4l_dbg(1, debug, client, "saa7115 not found\n"); 1326 v4l_dbg(1, debug, client, "saa7115 not found\n");
1266 kfree(client); 1327 kfree(client);
1267 return 0; 1328 return 0;
1268 } 1329 }
1269 if (chip_id == 4) { 1330 snprintf(client->name, sizeof(client->name) - 1, "saa711%d",chip_id);
1270 snprintf(client->name, sizeof(client->name) - 1, "saa7114");
1271 }
1272 v4l_info(client, "saa711%d found @ 0x%x (%s)\n", chip_id, address << 1, adapter->name); 1331 v4l_info(client, "saa711%d found @ 0x%x (%s)\n", chip_id, address << 1, adapter->name);
1273 1332
1274 state = kzalloc(sizeof(struct saa7115_state), GFP_KERNEL); 1333 state = kzalloc(sizeof(struct saa7115_state), GFP_KERNEL);
@@ -1285,13 +1344,27 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
1285 state->contrast = 64; 1344 state->contrast = 64;
1286 state->hue = 0; 1345 state->hue = 0;
1287 state->sat = 64; 1346 state->sat = 64;
1288 state->ident = (chip_id == 4) ? V4L2_IDENT_SAA7114 : V4L2_IDENT_SAA7115; 1347 switch (chip_id) {
1348 case 3:
1349 state->ident = V4L2_IDENT_SAA7113;
1350 break;
1351 case 4:
1352 state->ident = V4L2_IDENT_SAA7114;
1353 break;
1354 default:
1355 state->ident = V4L2_IDENT_SAA7115;
1356 break;
1357 }
1358
1289 state->audclk_freq = 48000; 1359 state->audclk_freq = 48000;
1290 1360
1291 v4l_dbg(1, debug, client, "writing init values\n"); 1361 v4l_dbg(1, debug, client, "writing init values\n");
1292 1362
1293 /* init to 60hz/48khz */ 1363 /* init to 60hz/48khz */
1294 saa7115_writeregs(client, saa7115_init_auto_input); 1364 if (state->ident==V4L2_IDENT_SAA7113)
1365 saa7115_writeregs(client, saa7113_init_auto_input);
1366 else
1367 saa7115_writeregs(client, saa7115_init_auto_input);
1295 saa7115_writeregs(client, saa7115_init_misc); 1368 saa7115_writeregs(client, saa7115_init_misc);
1296 saa7115_writeregs(client, saa7115_cfg_60hz_fullres_x); 1369 saa7115_writeregs(client, saa7115_cfg_60hz_fullres_x);
1297 saa7115_writeregs(client, saa7115_cfg_60hz_fullres_y); 1370 saa7115_writeregs(client, saa7115_cfg_60hz_fullres_y);
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index 7df5e0826e12..64e2c108df34 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -308,8 +308,7 @@ static int dsp_buffer_init(struct saa7134_dev *dev)
308 308
309static int dsp_buffer_free(struct saa7134_dev *dev) 309static int dsp_buffer_free(struct saa7134_dev *dev)
310{ 310{
311 if (!dev->dmasound.blksize) 311 BUG_ON(!dev->dmasound.blksize);
312 BUG();
313 312
314 videobuf_dma_free(&dev->dmasound.dma); 313 videobuf_dma_free(&dev->dmasound.dma);
315 314
@@ -611,12 +610,12 @@ static int snd_card_saa7134_capture_open(snd_pcm_substream_t * substream)
611 struct saa7134_dev *dev = saa7134->dev; 610 struct saa7134_dev *dev = saa7134->dev;
612 int err; 611 int err;
613 612
614 down(&dev->dmasound.lock); 613 mutex_lock(&dev->dmasound.lock);
615 614
616 dev->dmasound.read_count = 0; 615 dev->dmasound.read_count = 0;
617 dev->dmasound.read_offset = 0; 616 dev->dmasound.read_offset = 0;
618 617
619 up(&dev->dmasound.lock); 618 mutex_unlock(&dev->dmasound.lock);
620 619
621 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); 620 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
622 if (pcm == NULL) 621 if (pcm == NULL)
@@ -934,7 +933,7 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
934 933
935 chip->irq = dev->pci->irq; 934 chip->irq = dev->pci->irq;
936 935
937 init_MUTEX(&dev->dmasound.lock); 936 mutex_init(&dev->dmasound.lock);
938 937
939 if ((err = snd_card_saa7134_new_mixer(chip)) < 0) 938 if ((err = snd_card_saa7134_new_mixer(chip)) < 0)
940 goto __nodev; 939 goto __nodev;
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 6bc63a4086c1..fdd7f48f3b76 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -536,7 +536,7 @@ struct saa7134_board saa7134_boards[] = {
536 .radio = { 536 .radio = {
537 .name = name_radio, 537 .name = name_radio,
538 .amux = LINE2, 538 .amux = LINE2,
539 }, 539 },
540 }, 540 },
541 [SAA7134_BOARD_MD7134] = { 541 [SAA7134_BOARD_MD7134] = {
542 .name = "Medion 7134", 542 .name = "Medion 7134",
@@ -640,6 +640,32 @@ struct saa7134_board saa7134_boards[] = {
640 .tv = 1, 640 .tv = 1,
641 }}, 641 }},
642 }, 642 },
643 [SAA7134_BOARD_ELSA_700TV] = {
644 .name = "ELSA EX-VISION 700TV",
645 .audio_clock = 0x00187de7,
646 .tuner_type = TUNER_HITACHI_NTSC,
647 .radio_type = UNSET,
648 .tuner_addr = ADDR_UNSET,
649 .radio_addr = ADDR_UNSET,
650 .inputs = {{
651 .name = name_tv,
652 .vmux = 4,
653 .amux = LINE2,
654 .tv = 1,
655 },{
656 .name = name_comp1,
657 .vmux = 6,
658 .amux = LINE1,
659 },{
660 .name = name_svideo,
661 .vmux = 7,
662 .amux = LINE1,
663 }},
664 .mute = {
665 .name = name_mute,
666 .amux = TV,
667 },
668 },
643 [SAA7134_BOARD_ASUSTeK_TVFM7134] = { 669 [SAA7134_BOARD_ASUSTeK_TVFM7134] = {
644 .name = "ASUS TV-FM 7134", 670 .name = "ASUS TV-FM 7134",
645 .audio_clock = 0x00187de7, 671 .audio_clock = 0x00187de7,
@@ -2002,7 +2028,7 @@ struct saa7134_board saa7134_boards[] = {
2002 [SAA7134_BOARD_FLYTV_DIGIMATRIX] = { 2028 [SAA7134_BOARD_FLYTV_DIGIMATRIX] = {
2003 .name = "FlyTV mini Asus Digimatrix", 2029 .name = "FlyTV mini Asus Digimatrix",
2004 .audio_clock = 0x00200000, 2030 .audio_clock = 0x00200000,
2005 .tuner_type = TUNER_LG_NTSC_TALN_MINI, 2031 .tuner_type = TUNER_LG_TALN,
2006 .radio_type = UNSET, 2032 .radio_type = UNSET,
2007 .tuner_addr = ADDR_UNSET, 2033 .tuner_addr = ADDR_UNSET,
2008 .radio_addr = ADDR_UNSET, 2034 .radio_addr = ADDR_UNSET,
@@ -2598,6 +2624,7 @@ struct saa7134_board saa7134_boards[] = {
2598 .tuner_addr = ADDR_UNSET, 2624 .tuner_addr = ADDR_UNSET,
2599 .radio_addr = ADDR_UNSET, 2625 .radio_addr = ADDR_UNSET,
2600 .gpiomask = 0x00200000, 2626 .gpiomask = 0x00200000,
2627 .mpeg = SAA7134_MPEG_DVB,
2601 .inputs = {{ 2628 .inputs = {{
2602 .name = name_tv, /* Analog broadcast/cable TV */ 2629 .name = name_tv, /* Analog broadcast/cable TV */
2603 .vmux = 1, 2630 .vmux = 1,
@@ -2623,6 +2650,164 @@ struct saa7134_board saa7134_boards[] = {
2623 .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ 2650 .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
2624 }, 2651 },
2625 }, 2652 },
2653 [SAA7134_BOARD_AVERMEDIA_777] = {
2654 .name = "AverTV DVB-T 777",
2655 .audio_clock = 0x00187de7,
2656 .tuner_type = TUNER_ABSENT,
2657 .radio_type = UNSET,
2658 .tuner_addr = ADDR_UNSET,
2659 .radio_addr = ADDR_UNSET,
2660 .mpeg = SAA7134_MPEG_DVB,
2661 .inputs = {{
2662 .name = name_comp1,
2663 .vmux = 0,
2664 .amux = LINE1,
2665 },{
2666 .name = name_svideo,
2667 .vmux = 8,
2668 .amux = LINE1,
2669 }},
2670 },
2671 [SAA7134_BOARD_FLYDVBT_LR301] = {
2672 /* LifeView FlyDVB-T */
2673 /* Giampiero Giancipoli <gianci@libero.it> */
2674 .name = "LifeView FlyDVB-T",
2675 .audio_clock = 0x00200000,
2676 .tuner_type = TUNER_ABSENT,
2677 .radio_type = UNSET,
2678 .tuner_addr = ADDR_UNSET,
2679 .radio_addr = ADDR_UNSET,
2680 .mpeg = SAA7134_MPEG_DVB,
2681 .inputs = {{
2682 .name = name_comp1, /* Composite input */
2683 .vmux = 3,
2684 .amux = LINE2,
2685 },{
2686 .name = name_svideo, /* S-Video signal on S-Video input */
2687 .vmux = 8,
2688 .amux = LINE2,
2689 }},
2690 },
2691 [SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331] = {
2692 .name = "ADS Instant TV Duo Cardbus PTV331",
2693 .audio_clock = 0x00200000,
2694 .tuner_type = TUNER_PHILIPS_TDA8290,
2695 .radio_type = UNSET,
2696 .tuner_addr = ADDR_UNSET,
2697 .radio_addr = ADDR_UNSET,
2698 .mpeg = SAA7134_MPEG_DVB,
2699 .gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */
2700 .inputs = {{
2701 .name = name_tv,
2702 .vmux = 1,
2703 .amux = TV,
2704 .tv = 1,
2705 .gpio = 0x00200000,
2706 }},
2707 },
2708 [SAA7134_BOARD_TEVION_DVBT_220RF] = {
2709 .name = "Tevion/KWorld DVB-T 220RF",
2710 .audio_clock = 0x00187de7,
2711 .tuner_type = TUNER_PHILIPS_TDA8290,
2712 .radio_type = UNSET,
2713 .tuner_addr = ADDR_UNSET,
2714 .radio_addr = ADDR_UNSET,
2715 .mpeg = SAA7134_MPEG_DVB,
2716 .inputs = {{
2717 .name = name_tv,
2718 .vmux = 1,
2719 .amux = TV,
2720 .tv = 1,
2721 },{
2722 .name = name_comp1,
2723 .vmux = 3,
2724 .amux = LINE1,
2725 },{
2726 .name = name_svideo,
2727 .vmux = 0,
2728 .amux = LINE1,
2729 }},
2730 .radio = {
2731 .name = name_radio,
2732 .amux = LINE1,
2733 },
2734 },
2735 [SAA7134_BOARD_KWORLD_ATSC110] = {
2736 .name = "Kworld ATSC110",
2737 .audio_clock = 0x00187de7,
2738 .tuner_type = TUNER_PHILIPS_TUV1236D,
2739 .radio_type = UNSET,
2740 .tuner_addr = ADDR_UNSET,
2741 .radio_addr = ADDR_UNSET,
2742 .tda9887_conf = TDA9887_PRESENT,
2743 .mpeg = SAA7134_MPEG_DVB,
2744 .inputs = {{
2745 .name = name_tv,
2746 .vmux = 1,
2747 .amux = TV,
2748 .tv = 1,
2749 },{
2750 .name = name_comp1,
2751 .vmux = 3,
2752 .amux = LINE2,
2753 },{
2754 .name = name_svideo,
2755 .vmux = 8,
2756 .amux = LINE2,
2757 }},
2758 },
2759 [SAA7134_BOARD_AVERMEDIA_A169_B] = {
2760 /* AVerMedia A169 */
2761 /* Rickard Osser <ricky@osser.se> */
2762 /* This card has two saa7134 chips on it,
2763 but only one of them is currently working. */
2764 .name = "AVerMedia A169 B",
2765 .audio_clock = 0x02187de7,
2766 .tuner_type = TUNER_LG_TALN,
2767 .radio_type = UNSET,
2768 .tuner_addr = ADDR_UNSET,
2769 .radio_addr = ADDR_UNSET,
2770 .tda9887_conf = TDA9887_PRESENT,
2771 .gpiomask = 0x0a60000,
2772 },
2773 [SAA7134_BOARD_AVERMEDIA_A169_B1] = {
2774 /* AVerMedia A169 */
2775 /* Rickard Osser <ricky@osser.se> */
2776 .name = "AVerMedia A169 B1",
2777 .audio_clock = 0x02187de7,
2778 .tuner_type = TUNER_LG_TALN,
2779 .radio_type = UNSET,
2780 .tuner_addr = ADDR_UNSET,
2781 .radio_addr = ADDR_UNSET,
2782 .tda9887_conf = TDA9887_PRESENT,
2783 .gpiomask = 0xca60000,
2784 .inputs = {{
2785 .name = name_tv,
2786 .vmux = 4,
2787 .amux = TV,
2788 .tv = 1,
2789 .gpio = 0x04a61000,
2790 },{
2791 .name = name_comp2, /* Composite SVIDEO (B/W if signal is carried with SVIDEO) */
2792 .vmux = 1,
2793 .amux = LINE2,
2794 },{
2795 .name = name_svideo,
2796 .vmux = 9, /* 9 is correct as S-VIDEO1 according to a169.inf! */
2797 .amux = LINE1,
2798 }},
2799 },
2800 [SAA7134_BOARD_MD7134_BRIDGE_2] = {
2801 /* This card has two saa7134 chips on it,
2802 but only one of them is currently working.
2803 The programming for the primary decoder is
2804 in SAA7134_BOARD_MD7134 */
2805 .name = "Medion 7134 Bridge #2",
2806 .audio_clock = 0x00187de7,
2807 .radio_type = UNSET,
2808 .tuner_addr = ADDR_UNSET,
2809 .radio_addr = ADDR_UNSET,
2810 },
2626}; 2811};
2627 2812
2628const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); 2813const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -2753,6 +2938,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
2753 .driver_data = SAA7134_BOARD_ELSA_500TV, 2938 .driver_data = SAA7134_BOARD_ELSA_500TV,
2754 },{ 2939 },{
2755 .vendor = PCI_VENDOR_ID_PHILIPS, 2940 .vendor = PCI_VENDOR_ID_PHILIPS,
2941 .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
2942 .subvendor = 0x1048,
2943 .subdevice = 0x226c,
2944 .driver_data = SAA7134_BOARD_ELSA_700TV,
2945 },{
2946 .vendor = PCI_VENDOR_ID_PHILIPS,
2756 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 2947 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
2757 .subvendor = PCI_VENDOR_ID_ASUSTEK, 2948 .subvendor = PCI_VENDOR_ID_ASUSTEK,
2758 .subdevice = 0x4842, 2949 .subdevice = 0x4842,
@@ -3094,6 +3285,54 @@ struct pci_device_id saa7134_pci_tbl[] = {
3094 .subdevice = 0x0319, 3285 .subdevice = 0x0319,
3095 .driver_data = SAA7134_BOARD_FLYDVB_TRIO, 3286 .driver_data = SAA7134_BOARD_FLYDVB_TRIO,
3096 },{ 3287 },{
3288 .vendor = PCI_VENDOR_ID_PHILIPS,
3289 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, /* SAA 7131E */
3290 .subvendor = 0x1461,
3291 .subdevice = 0x2c05,
3292 .driver_data = SAA7134_BOARD_AVERMEDIA_777,
3293 },{
3294 .vendor = PCI_VENDOR_ID_PHILIPS,
3295 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
3296 .subvendor = 0x5168,
3297 .subdevice = 0x0301,
3298 .driver_data = SAA7134_BOARD_FLYDVBT_LR301,
3299 },{
3300 .vendor = PCI_VENDOR_ID_PHILIPS,
3301 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3302 .subvendor = 0x0331,
3303 .subdevice = 0x1421,
3304 .driver_data = SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331,
3305 },{
3306 .vendor = PCI_VENDOR_ID_PHILIPS,
3307 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3308 .subvendor = 0x17de,
3309 .subdevice = 0x7201,
3310 .driver_data = SAA7134_BOARD_TEVION_DVBT_220RF,
3311 },{
3312 .vendor = PCI_VENDOR_ID_PHILIPS,
3313 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, /* SAA7135HL */
3314 .subvendor = 0x17de,
3315 .subdevice = 0x7350,
3316 .driver_data = SAA7134_BOARD_KWORLD_ATSC110,
3317 },{
3318 .vendor = PCI_VENDOR_ID_PHILIPS,
3319 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
3320 .subvendor = 0x1461,
3321 .subdevice = 0x7360,
3322 .driver_data = SAA7134_BOARD_AVERMEDIA_A169_B,
3323 },{
3324 .vendor = PCI_VENDOR_ID_PHILIPS,
3325 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
3326 .subvendor = 0x1461,
3327 .subdevice = 0x6360,
3328 .driver_data = SAA7134_BOARD_AVERMEDIA_A169_B1,
3329 },{
3330 .vendor = PCI_VENDOR_ID_PHILIPS,
3331 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
3332 .subvendor = 0x16be,
3333 .subdevice = 0x0005,
3334 .driver_data = SAA7134_BOARD_MD7134_BRIDGE_2,
3335 },{
3097 /* --- boards without eeprom + subsystem ID --- */ 3336 /* --- boards without eeprom + subsystem ID --- */
3098 .vendor = PCI_VENDOR_ID_PHILIPS, 3337 .vendor = PCI_VENDOR_ID_PHILIPS,
3099 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 3338 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -3193,13 +3432,15 @@ int saa7134_board_init1(struct saa7134_dev *dev)
3193 case SAA7134_BOARD_GOTVIEW_7135: 3432 case SAA7134_BOARD_GOTVIEW_7135:
3194 case SAA7134_BOARD_KWORLD_TERMINATOR: 3433 case SAA7134_BOARD_KWORLD_TERMINATOR:
3195 case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS: 3434 case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS:
3435 case SAA7134_BOARD_FLYDVBT_LR301:
3436 case SAA7134_BOARD_FLYDVBTDUO:
3196 dev->has_remote = SAA7134_REMOTE_GPIO; 3437 dev->has_remote = SAA7134_REMOTE_GPIO;
3197 break; 3438 break;
3198 case SAA7134_BOARD_MD5044: 3439 case SAA7134_BOARD_MD5044:
3199 printk("%s: seems there are two different versions of the MD5044\n" 3440 printk("%s: seems there are two different versions of the MD5044\n"
3200 "%s: (with the same ID) out there. If sound doesn't work for\n" 3441 "%s: (with the same ID) out there. If sound doesn't work for\n"
3201 "%s: you try the audio_clock_override=0x200000 insmod option.\n", 3442 "%s: you try the audio_clock_override=0x200000 insmod option.\n",
3202 dev->name,dev->name,dev->name); 3443 dev->name,dev->name,dev->name);
3203 break; 3444 break;
3204 case SAA7134_BOARD_CINERGY400_CARDBUS: 3445 case SAA7134_BOARD_CINERGY400_CARDBUS:
3205 /* power-up tuner chip */ 3446 /* power-up tuner chip */
@@ -3220,6 +3461,10 @@ int saa7134_board_init1(struct saa7134_dev *dev)
3220 saa_writeb(SAA7134_GPIO_GPMODE3, 0x08); 3461 saa_writeb(SAA7134_GPIO_GPMODE3, 0x08);
3221 saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x06); 3462 saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x06);
3222 break; 3463 break;
3464 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
3465 saa_writeb(SAA7134_GPIO_GPMODE3, 0x08);
3466 saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x00);
3467 break;
3223 case SAA7134_BOARD_AVERMEDIA_CARDBUS: 3468 case SAA7134_BOARD_AVERMEDIA_CARDBUS:
3224 /* power-up tuner chip */ 3469 /* power-up tuner chip */
3225 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0xffffffff, 0xffffffff); 3470 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0xffffffff, 0xffffffff);
@@ -3242,6 +3487,13 @@ int saa7134_board_init1(struct saa7134_dev *dev)
3242 case SAA7134_BOARD_UPMOST_PURPLE_TV: 3487 case SAA7134_BOARD_UPMOST_PURPLE_TV:
3243 dev->has_remote = SAA7134_REMOTE_I2C; 3488 dev->has_remote = SAA7134_REMOTE_I2C;
3244 break; 3489 break;
3490 case SAA7134_BOARD_AVERMEDIA_A169_B:
3491 case SAA7134_BOARD_MD7134_BRIDGE_2:
3492 printk("%s: %s: dual saa713x broadcast decoders\n"
3493 "%s: Sorry, none of the inputs to this chip are supported yet.\n"
3494 "%s: Dual decoder functionality is disabled for now, use the other chip.\n",
3495 dev->name,card(dev).name,dev->name,dev->name);
3496 break;
3245 } 3497 }
3246 return 0; 3498 return 0;
3247} 3499}
@@ -3362,14 +3614,44 @@ int saa7134_board_init2(struct saa7134_dev *dev)
3362 } 3614 }
3363 break; 3615 break;
3364 case SAA7134_BOARD_PHILIPS_TIGER: 3616 case SAA7134_BOARD_PHILIPS_TIGER:
3617 case SAA7134_BOARD_TEVION_DVBT_220RF:
3365 case SAA7134_BOARD_ASUSTeK_P7131_DUAL: 3618 case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
3366 /* this is a hybrid board, initialize to analog mode */ 3619 /* this is a hybrid board, initialize to analog mode
3620 * and configure firmware eeprom address
3621 */
3367 { 3622 {
3368 u8 data[] = { 0x3c, 0x33, 0x68}; 3623 u8 data[] = { 0x3c, 0x33, 0x68};
3369 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 3624 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
3370 i2c_transfer(&dev->i2c_adap, &msg, 1); 3625 i2c_transfer(&dev->i2c_adap, &msg, 1);
3371 } 3626 }
3372 break; 3627 break;
3628 case SAA7134_BOARD_FLYDVB_TRIO:
3629 {
3630 u8 data[] = { 0x3c, 0x33, 0x62};
3631 struct i2c_msg msg = {.addr=0x09, .flags=0, .buf=data, .len = sizeof(data)};
3632 i2c_transfer(&dev->i2c_adap, &msg, 1);
3633 }
3634 break;
3635 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
3636 /* make the tda10046 find its eeprom */
3637 {
3638 u8 data[] = { 0x3c, 0x33, 0x62};
3639 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
3640 i2c_transfer(&dev->i2c_adap, &msg, 1);
3641 }
3642 break;
3643 case SAA7134_BOARD_KWORLD_ATSC110:
3644 {
3645 /* enable tuner */
3646 int i;
3647 static const u8 buffer [] = { 0x10,0x12,0x13,0x04,0x16,0x00,0x14,0x04,0x017,0x00 };
3648 dev->i2c_client.addr = 0x0a;
3649 for (i = 0; i < 5; i++)
3650 if (2 != i2c_master_send(&dev->i2c_client,&buffer[i*2],2))
3651 printk(KERN_WARNING "%s: Unable to enable tuner(%i).\n",
3652 dev->name, i);
3653 }
3654 break;
3373 } 3655 }
3374 return 0; 3656 return 0;
3375} 3657}
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 028904bd94a2..58e568d7d2ee 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -66,6 +66,11 @@ static unsigned int latency = UNSET;
66module_param(latency, int, 0444); 66module_param(latency, int, 0444);
67MODULE_PARM_DESC(latency,"pci latency timer"); 67MODULE_PARM_DESC(latency,"pci latency timer");
68 68
69int saa7134_no_overlay=-1;
70module_param_named(no_overlay, saa7134_no_overlay, int, 0444);
71MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
72 " [some VIA/SIS chipsets are known to have problem with overlay]");
73
69static unsigned int video_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; 74static unsigned int video_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
70static unsigned int vbi_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; 75static unsigned int vbi_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
71static unsigned int radio_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; 76static unsigned int radio_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
@@ -251,8 +256,7 @@ void saa7134_pgtable_free(struct pci_dev *pci, struct saa7134_pgtable *pt)
251 256
252void saa7134_dma_free(struct saa7134_dev *dev,struct saa7134_buf *buf) 257void saa7134_dma_free(struct saa7134_dev *dev,struct saa7134_buf *buf)
253{ 258{
254 if (in_interrupt()) 259 BUG_ON(in_interrupt());
255 BUG();
256 260
257 videobuf_waiton(&buf->vb,0,0); 261 videobuf_waiton(&buf->vb,0,0);
258 videobuf_dma_pci_unmap(dev->pci, &buf->vb.dma); 262 videobuf_dma_pci_unmap(dev->pci, &buf->vb.dma);
@@ -613,7 +617,7 @@ static int saa7134_hwinit1(struct saa7134_dev *dev)
613 617
614 saa_writel(SAA7134_IRQ1, 0); 618 saa_writel(SAA7134_IRQ1, 0);
615 saa_writel(SAA7134_IRQ2, 0); 619 saa_writel(SAA7134_IRQ2, 0);
616 init_MUTEX(&dev->lock); 620 mutex_init(&dev->lock);
617 spin_lock_init(&dev->slock); 621 spin_lock_init(&dev->slock);
618 622
619 saa7134_track_gpio(dev,"pre-init"); 623 saa7134_track_gpio(dev,"pre-init");
@@ -835,6 +839,22 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
835 latency = 0x0A; 839 latency = 0x0A;
836 } 840 }
837#endif 841#endif
842 if (pci_pci_problems & PCIPCI_FAIL) {
843 printk(KERN_INFO "%s: quirk: this driver and your "
844 "chipset may not work together"
845 " in overlay mode.\n",dev->name);
846 if (!saa7134_no_overlay) {
847 printk(KERN_INFO "%s: quirk: overlay "
848 "mode will be disabled.\n",
849 dev->name);
850 saa7134_no_overlay = 1;
851 } else {
852 printk(KERN_INFO "%s: quirk: overlay "
853 "mode will be forced. Use this"
854 " option at your own risk.\n",
855 dev->name);
856 }
857 }
838 } 858 }
839 if (UNSET != latency) { 859 if (UNSET != latency) {
840 printk(KERN_INFO "%s: setting pci latency timer to %d\n", 860 printk(KERN_INFO "%s: setting pci latency timer to %d\n",
@@ -937,6 +957,11 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
937 v4l2_prio_init(&dev->prio); 957 v4l2_prio_init(&dev->prio);
938 958
939 /* register v4l devices */ 959 /* register v4l devices */
960 if (saa7134_no_overlay <= 0) {
961 saa7134_video_template.type |= VID_TYPE_OVERLAY;
962 } else {
963 printk("bttv: Overlay support disabled.\n");
964 }
940 dev->video_dev = vdev_init(dev,&saa7134_video_template,"video"); 965 dev->video_dev = vdev_init(dev,&saa7134_video_template,"video");
941 err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER, 966 err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
942 video_nr[dev->nr]); 967 video_nr[dev->nr]);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 9db8e13f21c3..86cfdb8514cb 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -32,6 +32,7 @@
32#include "saa7134-reg.h" 32#include "saa7134-reg.h"
33#include "saa7134.h" 33#include "saa7134.h"
34#include <media/v4l2-common.h> 34#include <media/v4l2-common.h>
35#include "dvb-pll.h"
35 36
36#ifdef HAVE_MT352 37#ifdef HAVE_MT352
37# include "mt352.h" 38# include "mt352.h"
@@ -42,7 +43,6 @@
42#endif 43#endif
43#ifdef HAVE_NXT200X 44#ifdef HAVE_NXT200X
44# include "nxt200x.h" 45# include "nxt200x.h"
45# include "dvb-pll.h"
46#endif 46#endif
47 47
48MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 48MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
@@ -114,6 +114,24 @@ static int mt352_pinnacle_init(struct dvb_frontend* fe)
114 return 0; 114 return 0;
115} 115}
116 116
117static int mt352_aver777_init(struct dvb_frontend* fe)
118{
119 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x2d };
120 static u8 reset [] = { RESET, 0x80 };
121 static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
122 static u8 agc_cfg [] = { AGC_TARGET, 0x28, 0xa0 };
123 static u8 capt_range_cfg[] = { CAPT_RANGE, 0x33 };
124
125 mt352_write(fe, clock_config, sizeof(clock_config));
126 udelay(200);
127 mt352_write(fe, reset, sizeof(reset));
128 mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
129 mt352_write(fe, agc_cfg, sizeof(agc_cfg));
130 mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
131
132 return 0;
133}
134
117static int mt352_pinnacle_pll_set(struct dvb_frontend* fe, 135static int mt352_pinnacle_pll_set(struct dvb_frontend* fe,
118 struct dvb_frontend_parameters* params, 136 struct dvb_frontend_parameters* params,
119 u8* pllbuf) 137 u8* pllbuf)
@@ -146,6 +164,15 @@ static int mt352_pinnacle_pll_set(struct dvb_frontend* fe,
146 return 0; 164 return 0;
147} 165}
148 166
167static int mt352_aver777_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params, u8* pllbuf)
168{
169 pllbuf[0] = 0xc2;
170 dvb_pll_configure(&dvb_pll_philips_td1316, pllbuf+1,
171 params->frequency,
172 params->u.ofdm.bandwidth);
173 return 0;
174}
175
149static struct mt352_config pinnacle_300i = { 176static struct mt352_config pinnacle_300i = {
150 .demod_address = 0x3c >> 1, 177 .demod_address = 0x3c >> 1,
151 .adc_clock = 20333, 178 .adc_clock = 20333,
@@ -154,6 +181,12 @@ static struct mt352_config pinnacle_300i = {
154 .demod_init = mt352_pinnacle_init, 181 .demod_init = mt352_pinnacle_init,
155 .pll_set = mt352_pinnacle_pll_set, 182 .pll_set = mt352_pinnacle_pll_set,
156}; 183};
184
185static struct mt352_config avermedia_777 = {
186 .demod_address = 0xf,
187 .demod_init = mt352_aver777_init,
188 .pll_set = mt352_aver777_pll_set,
189};
157#endif 190#endif
158 191
159/* ------------------------------------------------------------------ */ 192/* ------------------------------------------------------------------ */
@@ -781,7 +814,7 @@ static int philips_tiger_pll_set(struct dvb_frontend *fe, struct dvb_frontend_pa
781 tda8290_msg.buf = tda8290_open; 814 tda8290_msg.buf = tda8290_open;
782 i2c_transfer(&dev->i2c_adap, &tda8290_msg, 1); 815 i2c_transfer(&dev->i2c_adap, &tda8290_msg, 1);
783 return ret; 816 return ret;
784}; 817}
785 818
786static int philips_tiger_dvb_mode(struct dvb_frontend *fe) 819static int philips_tiger_dvb_mode(struct dvb_frontend *fe)
787{ 820{
@@ -817,6 +850,110 @@ static struct tda1004x_config philips_tiger_config = {
817 .request_firmware = NULL, 850 .request_firmware = NULL,
818}; 851};
819 852
853/* ------------------------------------------------------------------ */
854
855static int lifeview_trio_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
856{
857 int ret;
858
859 ret = philips_tda827xa_pll_set(0x60, fe, params);
860 return ret;
861}
862
863static int lifeview_trio_dvb_mode(struct dvb_frontend *fe)
864{
865 return 0;
866}
867
868static void lifeview_trio_analog_mode(struct dvb_frontend *fe)
869{
870 philips_tda827xa_pll_sleep(0x60, fe);
871}
872
873static struct tda1004x_config lifeview_trio_config = {
874 .demod_address = 0x09,
875 .invert = 1,
876 .invert_oclk = 0,
877 .xtal_freq = TDA10046_XTAL_16M,
878 .agc_config = TDA10046_AGC_TDA827X_GPL,
879 .if_freq = TDA10046_FREQ_045,
880 .pll_init = lifeview_trio_dvb_mode,
881 .pll_set = lifeview_trio_pll_set,
882 .pll_sleep = lifeview_trio_analog_mode,
883 .request_firmware = NULL,
884};
885
886/* ------------------------------------------------------------------ */
887
888static int ads_duo_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
889{
890 int ret;
891
892 ret = philips_tda827xa_pll_set(0x61, fe, params);
893 return ret;
894}
895
896static int ads_duo_dvb_mode(struct dvb_frontend *fe)
897{
898 struct saa7134_dev *dev = fe->dvb->priv;
899 /* route TDA8275a AGC input to the channel decoder */
900 saa_writeb(SAA7134_GPIO_GPSTATUS2, 0x60);
901 return 0;
902}
903
904static void ads_duo_analog_mode(struct dvb_frontend *fe)
905{
906 struct saa7134_dev *dev = fe->dvb->priv;
907 /* route TDA8275a AGC input to the analog IF chip*/
908 saa_writeb(SAA7134_GPIO_GPSTATUS2, 0x20);
909 philips_tda827xa_pll_sleep( 0x61, fe);
910}
911
912static struct tda1004x_config ads_tech_duo_config = {
913 .demod_address = 0x08,
914 .invert = 1,
915 .invert_oclk = 0,
916 .xtal_freq = TDA10046_XTAL_16M,
917 .agc_config = TDA10046_AGC_TDA827X_GPL,
918 .if_freq = TDA10046_FREQ_045,
919 .pll_init = ads_duo_dvb_mode,
920 .pll_set = ads_duo_pll_set,
921 .pll_sleep = ads_duo_analog_mode,
922 .request_firmware = NULL,
923};
924
925/* ------------------------------------------------------------------ */
926
927static int tevion_dvb220rf_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
928{
929 int ret;
930 ret = philips_tda827xa_pll_set(0x60, fe, params);
931 return ret;
932}
933
934static int tevion_dvb220rf_pll_init(struct dvb_frontend *fe)
935{
936 return 0;
937}
938
939static void tevion_dvb220rf_pll_sleep(struct dvb_frontend *fe)
940{
941 philips_tda827xa_pll_sleep( 0x61, fe);
942}
943
944static struct tda1004x_config tevion_dvbt220rf_config = {
945 .demod_address = 0x08,
946 .invert = 1,
947 .invert_oclk = 0,
948 .xtal_freq = TDA10046_XTAL_16M,
949 .agc_config = TDA10046_AGC_TDA827X,
950 .if_freq = TDA10046_FREQ_045,
951 .pll_init = tevion_dvb220rf_pll_init,
952 .pll_set = tevion_dvb220rf_pll_set,
953 .pll_sleep = tevion_dvb220rf_pll_sleep,
954 .request_firmware = NULL,
955};
956
820#endif 957#endif
821 958
822/* ------------------------------------------------------------------ */ 959/* ------------------------------------------------------------------ */
@@ -827,6 +964,22 @@ static struct nxt200x_config avertvhda180 = {
827 .pll_address = 0x61, 964 .pll_address = 0x61,
828 .pll_desc = &dvb_pll_tdhu2, 965 .pll_desc = &dvb_pll_tdhu2,
829}; 966};
967
968static int nxt200x_set_pll_input(u8 *buf, int input)
969{
970 if (input)
971 buf[3] |= 0x08;
972 else
973 buf[3] &= ~0x08;
974 return 0;
975}
976
977static struct nxt200x_config kworldatsc110 = {
978 .demod_address = 0x0a,
979 .pll_address = 0x61,
980 .pll_desc = &dvb_pll_tuv1236d,
981 .set_pll_input = nxt200x_set_pll_input,
982};
830#endif 983#endif
831 984
832/* ------------------------------------------------------------------ */ 985/* ------------------------------------------------------------------ */
@@ -851,6 +1004,12 @@ static int dvb_init(struct saa7134_dev *dev)
851 dev->dvb.frontend = mt352_attach(&pinnacle_300i, 1004 dev->dvb.frontend = mt352_attach(&pinnacle_300i,
852 &dev->i2c_adap); 1005 &dev->i2c_adap);
853 break; 1006 break;
1007
1008 case SAA7134_BOARD_AVERMEDIA_777:
1009 printk("%s: avertv 777 dvb setup\n",dev->name);
1010 dev->dvb.frontend = mt352_attach(&avermedia_777,
1011 &dev->i2c_adap);
1012 break;
854#endif 1013#endif
855#ifdef HAVE_TDA1004X 1014#ifdef HAVE_TDA1004X
856 case SAA7134_BOARD_MD7134: 1015 case SAA7134_BOARD_MD7134:
@@ -889,11 +1048,30 @@ static int dvb_init(struct saa7134_dev *dev)
889 dev->dvb.frontend = tda10046_attach(&philips_tiger_config, 1048 dev->dvb.frontend = tda10046_attach(&philips_tiger_config,
890 &dev->i2c_adap); 1049 &dev->i2c_adap);
891 break; 1050 break;
1051 case SAA7134_BOARD_FLYDVBT_LR301:
1052 dev->dvb.frontend = tda10046_attach(&tda827x_lifeview_config,
1053 &dev->i2c_adap);
1054 break;
1055 case SAA7134_BOARD_FLYDVB_TRIO:
1056 dev->dvb.frontend = tda10046_attach(&lifeview_trio_config,
1057 &dev->i2c_adap);
1058 break;
1059 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
1060 dev->dvb.frontend = tda10046_attach(&ads_tech_duo_config,
1061 &dev->i2c_adap);
1062 break;
1063 case SAA7134_BOARD_TEVION_DVBT_220RF:
1064 dev->dvb.frontend = tda10046_attach(&tevion_dvbt220rf_config,
1065 &dev->i2c_adap);
1066 break;
892#endif 1067#endif
893#ifdef HAVE_NXT200X 1068#ifdef HAVE_NXT200X
894 case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180: 1069 case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180:
895 dev->dvb.frontend = nxt200x_attach(&avertvhda180, &dev->i2c_adap); 1070 dev->dvb.frontend = nxt200x_attach(&avertvhda180, &dev->i2c_adap);
896 break; 1071 break;
1072 case SAA7134_BOARD_KWORLD_ATSC110:
1073 dev->dvb.frontend = nxt200x_attach(&kworldatsc110, &dev->i2c_adap);
1074 break;
897#endif 1075#endif
898 default: 1076 default:
899 printk("%s: Huh? unknown DVB card?\n",dev->name); 1077 printk("%s: Huh? unknown DVB card?\n",dev->name);
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index bd4c389d4c37..1d972edb3be6 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -89,7 +89,7 @@ static int ts_open(struct inode *inode, struct file *file)
89 89
90 dprintk("open minor=%d\n",minor); 90 dprintk("open minor=%d\n",minor);
91 err = -EBUSY; 91 err = -EBUSY;
92 if (down_trylock(&dev->empress_tsq.lock)) 92 if (!mutex_trylock(&dev->empress_tsq.lock))
93 goto done; 93 goto done;
94 if (dev->empress_users) 94 if (dev->empress_users)
95 goto done_up; 95 goto done_up;
@@ -99,7 +99,7 @@ static int ts_open(struct inode *inode, struct file *file)
99 err = 0; 99 err = 0;
100 100
101done_up: 101done_up:
102 up(&dev->empress_tsq.lock); 102 mutex_unlock(&dev->empress_tsq.lock);
103done: 103done:
104 return err; 104 return err;
105} 105}
@@ -110,7 +110,7 @@ static int ts_release(struct inode *inode, struct file *file)
110 110
111 if (dev->empress_tsq.streaming) 111 if (dev->empress_tsq.streaming)
112 videobuf_streamoff(&dev->empress_tsq); 112 videobuf_streamoff(&dev->empress_tsq);
113 down(&dev->empress_tsq.lock); 113 mutex_lock(&dev->empress_tsq.lock);
114 if (dev->empress_tsq.reading) 114 if (dev->empress_tsq.reading)
115 videobuf_read_stop(&dev->empress_tsq); 115 videobuf_read_stop(&dev->empress_tsq);
116 videobuf_mmap_free(&dev->empress_tsq); 116 videobuf_mmap_free(&dev->empress_tsq);
@@ -119,7 +119,7 @@ static int ts_release(struct inode *inode, struct file *file)
119 /* stop the encoder */ 119 /* stop the encoder */
120 ts_reset_encoder(dev); 120 ts_reset_encoder(dev);
121 121
122 up(&dev->empress_tsq.lock); 122 mutex_unlock(&dev->empress_tsq.lock);
123 return 0; 123 return 0;
124} 124}
125 125
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 82d28cbf289f..1426e4c8602f 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -42,485 +42,6 @@ MODULE_PARM_DESC(ir_debug,"enable debug messages [IR]");
42#define i2cdprintk(fmt, arg...) if (ir_debug) \ 42#define i2cdprintk(fmt, arg...) if (ir_debug) \
43 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg) 43 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg)
44 44
45/* ---------------------------------------------------------------------- */
46
47static IR_KEYTAB_TYPE flyvideo_codes[IR_KEYTAB_SIZE] = {
48 [ 15 ] = KEY_KP0,
49 [ 3 ] = KEY_KP1,
50 [ 4 ] = KEY_KP2,
51 [ 5 ] = KEY_KP3,
52 [ 7 ] = KEY_KP4,
53 [ 8 ] = KEY_KP5,
54 [ 9 ] = KEY_KP6,
55 [ 11 ] = KEY_KP7,
56 [ 12 ] = KEY_KP8,
57 [ 13 ] = KEY_KP9,
58
59 [ 14 ] = KEY_MODE, // Air/Cable
60 [ 17 ] = KEY_VIDEO, // Video
61 [ 21 ] = KEY_AUDIO, // Audio
62 [ 0 ] = KEY_POWER, // Power
63 [ 24 ] = KEY_TUNER, // AV Source
64 [ 2 ] = KEY_ZOOM, // Fullscreen
65 [ 26 ] = KEY_LANGUAGE, // Stereo
66 [ 27 ] = KEY_MUTE, // Mute
67 [ 20 ] = KEY_VOLUMEUP, // Volume +
68 [ 23 ] = KEY_VOLUMEDOWN, // Volume -
69 [ 18 ] = KEY_CHANNELUP, // Channel +
70 [ 19 ] = KEY_CHANNELDOWN, // Channel -
71 [ 6 ] = KEY_AGAIN, // Recall
72 [ 16 ] = KEY_ENTER, // Enter
73};
74
75
76static IR_KEYTAB_TYPE cinergy_codes[IR_KEYTAB_SIZE] = {
77 [ 0 ] = KEY_KP0,
78 [ 1 ] = KEY_KP1,
79 [ 2 ] = KEY_KP2,
80 [ 3 ] = KEY_KP3,
81 [ 4 ] = KEY_KP4,
82 [ 5 ] = KEY_KP5,
83 [ 6 ] = KEY_KP6,
84 [ 7 ] = KEY_KP7,
85 [ 8 ] = KEY_KP8,
86 [ 9 ] = KEY_KP9,
87
88 [ 0x0a ] = KEY_POWER,
89 [ 0x0b ] = KEY_PROG1, // app
90 [ 0x0c ] = KEY_ZOOM, // zoom/fullscreen
91 [ 0x0d ] = KEY_CHANNELUP, // channel
92 [ 0x0e ] = KEY_CHANNELDOWN, // channel-
93 [ 0x0f ] = KEY_VOLUMEUP,
94 [ 0x10 ] = KEY_VOLUMEDOWN,
95 [ 0x11 ] = KEY_TUNER, // AV
96 [ 0x12 ] = KEY_NUMLOCK, // -/--
97 [ 0x13 ] = KEY_AUDIO, // audio
98 [ 0x14 ] = KEY_MUTE,
99 [ 0x15 ] = KEY_UP,
100 [ 0x16 ] = KEY_DOWN,
101 [ 0x17 ] = KEY_LEFT,
102 [ 0x18 ] = KEY_RIGHT,
103 [ 0x19 ] = BTN_LEFT,
104 [ 0x1a ] = BTN_RIGHT,
105 [ 0x1b ] = KEY_WWW, // text
106 [ 0x1c ] = KEY_REWIND,
107 [ 0x1d ] = KEY_FORWARD,
108 [ 0x1e ] = KEY_RECORD,
109 [ 0x1f ] = KEY_PLAY,
110 [ 0x20 ] = KEY_PREVIOUSSONG,
111 [ 0x21 ] = KEY_NEXTSONG,
112 [ 0x22 ] = KEY_PAUSE,
113 [ 0x23 ] = KEY_STOP,
114};
115
116/* Alfons Geser <a.geser@cox.net>
117 * updates from Job D. R. Borges <jobdrb@ig.com.br> */
118static IR_KEYTAB_TYPE eztv_codes[IR_KEYTAB_SIZE] = {
119 [ 18 ] = KEY_POWER,
120 [ 1 ] = KEY_TV, // DVR
121 [ 21 ] = KEY_DVD, // DVD
122 [ 23 ] = KEY_AUDIO, // music
123 // DVR mode / DVD mode / music mode
124
125 [ 27 ] = KEY_MUTE, // mute
126 [ 2 ] = KEY_LANGUAGE, // MTS/SAP / audio / autoseek
127 [ 30 ] = KEY_SUBTITLE, // closed captioning / subtitle / seek
128 [ 22 ] = KEY_ZOOM, // full screen
129 [ 28 ] = KEY_VIDEO, // video source / eject / delall
130 [ 29 ] = KEY_RESTART, // playback / angle / del
131 [ 47 ] = KEY_SEARCH, // scan / menu / playlist
132 [ 48 ] = KEY_CHANNEL, // CH surfing / bookmark / memo
133
134 [ 49 ] = KEY_HELP, // help
135 [ 50 ] = KEY_MODE, // num/memo
136 [ 51 ] = KEY_ESC, // cancel
137
138 [ 12 ] = KEY_UP, // up
139 [ 16 ] = KEY_DOWN, // down
140 [ 8 ] = KEY_LEFT, // left
141 [ 4 ] = KEY_RIGHT, // right
142 [ 3 ] = KEY_SELECT, // select
143
144 [ 31 ] = KEY_REWIND, // rewind
145 [ 32 ] = KEY_PLAYPAUSE, // play/pause
146 [ 41 ] = KEY_FORWARD, // forward
147 [ 20 ] = KEY_AGAIN, // repeat
148 [ 43 ] = KEY_RECORD, // recording
149 [ 44 ] = KEY_STOP, // stop
150 [ 45 ] = KEY_PLAY, // play
151 [ 46 ] = KEY_SHUFFLE, // snapshot / shuffle
152
153 [ 0 ] = KEY_KP0,
154 [ 5 ] = KEY_KP1,
155 [ 6 ] = KEY_KP2,
156 [ 7 ] = KEY_KP3,
157 [ 9 ] = KEY_KP4,
158 [ 10 ] = KEY_KP5,
159 [ 11 ] = KEY_KP6,
160 [ 13 ] = KEY_KP7,
161 [ 14 ] = KEY_KP8,
162 [ 15 ] = KEY_KP9,
163
164 [ 42 ] = KEY_VOLUMEUP,
165 [ 17 ] = KEY_VOLUMEDOWN,
166 [ 24 ] = KEY_CHANNELUP, // CH.tracking up
167 [ 25 ] = KEY_CHANNELDOWN, // CH.tracking down
168
169 [ 19 ] = KEY_KPENTER, // enter
170 [ 33 ] = KEY_KPDOT, // . (decimal dot)
171};
172
173static IR_KEYTAB_TYPE avacssmart_codes[IR_KEYTAB_SIZE] = {
174 [ 30 ] = KEY_POWER, // power
175 [ 28 ] = KEY_SEARCH, // scan
176 [ 7 ] = KEY_SELECT, // source
177
178 [ 22 ] = KEY_VOLUMEUP,
179 [ 20 ] = KEY_VOLUMEDOWN,
180 [ 31 ] = KEY_CHANNELUP,
181 [ 23 ] = KEY_CHANNELDOWN,
182 [ 24 ] = KEY_MUTE,
183
184 [ 2 ] = KEY_KP0,
185 [ 1 ] = KEY_KP1,
186 [ 11 ] = KEY_KP2,
187 [ 27 ] = KEY_KP3,
188 [ 5 ] = KEY_KP4,
189 [ 9 ] = KEY_KP5,
190 [ 21 ] = KEY_KP6,
191 [ 6 ] = KEY_KP7,
192 [ 10 ] = KEY_KP8,
193 [ 18 ] = KEY_KP9,
194 [ 16 ] = KEY_KPDOT,
195
196 [ 3 ] = KEY_TUNER, // tv/fm
197 [ 4 ] = KEY_REWIND, // fm tuning left or function left
198 [ 12 ] = KEY_FORWARD, // fm tuning right or function right
199
200 [ 0 ] = KEY_RECORD,
201 [ 8 ] = KEY_STOP,
202 [ 17 ] = KEY_PLAY,
203
204 [ 25 ] = KEY_ZOOM,
205 [ 14 ] = KEY_MENU, // function
206 [ 19 ] = KEY_AGAIN, // recall
207 [ 29 ] = KEY_RESTART, // reset
208 [ 26 ] = KEY_SHUFFLE, // snapshot/shuffle
209
210// FIXME
211 [ 13 ] = KEY_F21, // mts
212 [ 15 ] = KEY_F22, // min
213};
214
215/* Alex Hermann <gaaf@gmx.net> */
216static IR_KEYTAB_TYPE md2819_codes[IR_KEYTAB_SIZE] = {
217 [ 40 ] = KEY_KP1,
218 [ 24 ] = KEY_KP2,
219 [ 56 ] = KEY_KP3,
220 [ 36 ] = KEY_KP4,
221 [ 20 ] = KEY_KP5,
222 [ 52 ] = KEY_KP6,
223 [ 44 ] = KEY_KP7,
224 [ 28 ] = KEY_KP8,
225 [ 60 ] = KEY_KP9,
226 [ 34 ] = KEY_KP0,
227
228 [ 32 ] = KEY_TV, // TV/FM
229 [ 16 ] = KEY_CD, // CD
230 [ 48 ] = KEY_TEXT, // TELETEXT
231 [ 0 ] = KEY_POWER, // POWER
232
233 [ 8 ] = KEY_VIDEO, // VIDEO
234 [ 4 ] = KEY_AUDIO, // AUDIO
235 [ 12 ] = KEY_ZOOM, // FULL SCREEN
236
237 [ 18 ] = KEY_SUBTITLE, // DISPLAY - ???
238 [ 50 ] = KEY_REWIND, // LOOP - ???
239 [ 2 ] = KEY_PRINT, // PREVIEW - ???
240
241 [ 42 ] = KEY_SEARCH, // AUTOSCAN
242 [ 26 ] = KEY_SLEEP, // FREEZE - ???
243 [ 58 ] = KEY_SHUFFLE, // SNAPSHOT - ???
244 [ 10 ] = KEY_MUTE, // MUTE
245
246 [ 38 ] = KEY_RECORD, // RECORD
247 [ 22 ] = KEY_PAUSE, // PAUSE
248 [ 54 ] = KEY_STOP, // STOP
249 [ 6 ] = KEY_PLAY, // PLAY
250
251 [ 46 ] = KEY_RED, // <RED>
252 [ 33 ] = KEY_GREEN, // <GREEN>
253 [ 14 ] = KEY_YELLOW, // <YELLOW>
254 [ 1 ] = KEY_BLUE, // <BLUE>
255
256 [ 30 ] = KEY_VOLUMEDOWN, // VOLUME-
257 [ 62 ] = KEY_VOLUMEUP, // VOLUME+
258 [ 17 ] = KEY_CHANNELDOWN, // CHANNEL/PAGE-
259 [ 49 ] = KEY_CHANNELUP // CHANNEL/PAGE+
260};
261
262static IR_KEYTAB_TYPE videomate_tv_pvr_codes[IR_KEYTAB_SIZE] = {
263 [ 20 ] = KEY_MUTE,
264 [ 36 ] = KEY_ZOOM,
265
266 [ 1 ] = KEY_DVD,
267 [ 35 ] = KEY_RADIO,
268 [ 0 ] = KEY_TV,
269
270 [ 10 ] = KEY_REWIND,
271 [ 8 ] = KEY_PLAYPAUSE,
272 [ 15 ] = KEY_FORWARD,
273
274 [ 2 ] = KEY_PREVIOUS,
275 [ 7 ] = KEY_STOP,
276 [ 6 ] = KEY_NEXT,
277
278 [ 12 ] = KEY_UP,
279 [ 14 ] = KEY_DOWN,
280 [ 11 ] = KEY_LEFT,
281 [ 13 ] = KEY_RIGHT,
282 [ 17 ] = KEY_OK,
283
284 [ 3 ] = KEY_MENU,
285 [ 9 ] = KEY_SETUP,
286 [ 5 ] = KEY_VIDEO,
287 [ 34 ] = KEY_CHANNEL,
288
289 [ 18 ] = KEY_VOLUMEUP,
290 [ 21 ] = KEY_VOLUMEDOWN,
291 [ 16 ] = KEY_CHANNELUP,
292 [ 19 ] = KEY_CHANNELDOWN,
293
294 [ 4 ] = KEY_RECORD,
295
296 [ 22 ] = KEY_KP1,
297 [ 23 ] = KEY_KP2,
298 [ 24 ] = KEY_KP3,
299 [ 25 ] = KEY_KP4,
300 [ 26 ] = KEY_KP5,
301 [ 27 ] = KEY_KP6,
302 [ 28 ] = KEY_KP7,
303 [ 29 ] = KEY_KP8,
304 [ 30 ] = KEY_KP9,
305 [ 31 ] = KEY_KP0,
306
307 [ 32 ] = KEY_LANGUAGE,
308 [ 33 ] = KEY_SLEEP,
309};
310
311/* Michael Tokarev <mjt@tls.msk.ru>
312 http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
313 keytable is used by MANLI MTV00[12] and BeholdTV 40[13] at
314 least, and probably other cards too.
315 The "ascii-art picture" below (in comments, first row
316 is the keycode in hex, and subsequent row(s) shows
317 the button labels (several variants when appropriate)
318 helps to descide which keycodes to assign to the buttons.
319 */
320static IR_KEYTAB_TYPE manli_codes[IR_KEYTAB_SIZE] = {
321
322 /* 0x1c 0x12 *
323 * FUNCTION POWER *
324 * FM (|) *
325 * */
326 [ 0x1c ] = KEY_RADIO, /*XXX*/
327 [ 0x12 ] = KEY_POWER,
328
329 /* 0x01 0x02 0x03 *
330 * 1 2 3 *
331 * *
332 * 0x04 0x05 0x06 *
333 * 4 5 6 *
334 * *
335 * 0x07 0x08 0x09 *
336 * 7 8 9 *
337 * */
338 [ 0x01 ] = KEY_KP1,
339 [ 0x02 ] = KEY_KP2,
340 [ 0x03 ] = KEY_KP3,
341 [ 0x04 ] = KEY_KP4,
342 [ 0x05 ] = KEY_KP5,
343 [ 0x06 ] = KEY_KP6,
344 [ 0x07 ] = KEY_KP7,
345 [ 0x08 ] = KEY_KP8,
346 [ 0x09 ] = KEY_KP9,
347
348 /* 0x0a 0x00 0x17 *
349 * RECALL 0 +100 *
350 * PLUS *
351 * */
352 [ 0x0a ] = KEY_AGAIN, /*XXX KEY_REWIND? */
353 [ 0x00 ] = KEY_KP0,
354 [ 0x17 ] = KEY_DIGITS, /*XXX*/
355
356 /* 0x14 0x10 *
357 * MENU INFO *
358 * OSD */
359 [ 0x14 ] = KEY_MENU,
360 [ 0x10 ] = KEY_INFO,
361
362 /* 0x0b *
363 * Up *
364 * *
365 * 0x18 0x16 0x0c *
366 * Left Ok Right *
367 * *
368 * 0x015 *
369 * Down *
370 * */
371 [ 0x0b ] = KEY_UP, /*XXX KEY_SCROLLUP? */
372 [ 0x18 ] = KEY_LEFT, /*XXX KEY_BACK? */
373 [ 0x16 ] = KEY_OK, /*XXX KEY_SELECT? KEY_ENTER? */
374 [ 0x0c ] = KEY_RIGHT, /*XXX KEY_FORWARD? */
375 [ 0x15 ] = KEY_DOWN, /*XXX KEY_SCROLLDOWN? */
376
377 /* 0x11 0x0d *
378 * TV/AV MODE *
379 * SOURCE STEREO *
380 * */
381 [ 0x11 ] = KEY_TV, /*XXX*/
382 [ 0x0d ] = KEY_MODE, /*XXX there's no KEY_STEREO */
383
384 /* 0x0f 0x1b 0x1a *
385 * AUDIO Vol+ Chan+ *
386 * TIMESHIFT??? *
387 * *
388 * 0x0e 0x1f 0x1e *
389 * SLEEP Vol- Chan- *
390 * */
391 [ 0x0f ] = KEY_AUDIO,
392 [ 0x1b ] = KEY_VOLUMEUP,
393 [ 0x1a ] = KEY_CHANNELUP,
394 [ 0x0e ] = KEY_SLEEP, /*XXX maybe KEY_PAUSE */
395 [ 0x1f ] = KEY_VOLUMEDOWN,
396 [ 0x1e ] = KEY_CHANNELDOWN,
397
398 /* 0x13 0x19 *
399 * MUTE SNAPSHOT*
400 * */
401 [ 0x13 ] = KEY_MUTE,
402 [ 0x19 ] = KEY_RECORD, /*XXX*/
403
404 // 0x1d unused ?
405};
406
407
408/* Mike Baikov <mike@baikov.com> */
409static IR_KEYTAB_TYPE gotview7135_codes[IR_KEYTAB_SIZE] = {
410
411 [ 33 ] = KEY_POWER,
412 [ 105] = KEY_TV,
413 [ 51 ] = KEY_KP0,
414 [ 81 ] = KEY_KP1,
415 [ 49 ] = KEY_KP2,
416 [ 113] = KEY_KP3,
417 [ 59 ] = KEY_KP4,
418 [ 88 ] = KEY_KP5,
419 [ 65 ] = KEY_KP6,
420 [ 72 ] = KEY_KP7,
421 [ 48 ] = KEY_KP8,
422 [ 83 ] = KEY_KP9,
423 [ 115] = KEY_AGAIN, /* LOOP */
424 [ 10 ] = KEY_AUDIO,
425 [ 97 ] = KEY_PRINT, /* PREVIEW */
426 [ 122] = KEY_VIDEO,
427 [ 32 ] = KEY_CHANNELUP,
428 [ 64 ] = KEY_CHANNELDOWN,
429 [ 24 ] = KEY_VOLUMEDOWN,
430 [ 80 ] = KEY_VOLUMEUP,
431 [ 16 ] = KEY_MUTE,
432 [ 74 ] = KEY_SEARCH,
433 [ 123] = KEY_SHUFFLE, /* SNAPSHOT */
434 [ 34 ] = KEY_RECORD,
435 [ 98 ] = KEY_STOP,
436 [ 120] = KEY_PLAY,
437 [ 57 ] = KEY_REWIND,
438 [ 89 ] = KEY_PAUSE,
439 [ 25 ] = KEY_FORWARD,
440 [ 9 ] = KEY_ZOOM,
441
442 [ 82 ] = KEY_F21, /* LIVE TIMESHIFT */
443 [ 26 ] = KEY_F22, /* MIN TIMESHIFT */
444 [ 58 ] = KEY_F23, /* TIMESHIFT */
445 [ 112] = KEY_F24, /* NORMAL TIMESHIFT */
446};
447
448static IR_KEYTAB_TYPE ir_codes_purpletv[IR_KEYTAB_SIZE] = {
449 [ 0x3 ] = KEY_POWER,
450 [ 0x6f ] = KEY_MUTE,
451 [ 0x10 ] = KEY_BACKSPACE, /* Recall */
452
453 [ 0x11 ] = KEY_KP0,
454 [ 0x4 ] = KEY_KP1,
455 [ 0x5 ] = KEY_KP2,
456 [ 0x6 ] = KEY_KP3,
457 [ 0x8 ] = KEY_KP4,
458 [ 0x9 ] = KEY_KP5,
459 [ 0xa ] = KEY_KP6,
460 [ 0xc ] = KEY_KP7,
461 [ 0xd ] = KEY_KP8,
462 [ 0xe ] = KEY_KP9,
463 [ 0x12 ] = KEY_KPDOT, /* 100+ */
464
465 [ 0x7 ] = KEY_VOLUMEUP,
466 [ 0xb ] = KEY_VOLUMEDOWN,
467 [ 0x1a ] = KEY_KPPLUS,
468 [ 0x18 ] = KEY_KPMINUS,
469 [ 0x15 ] = KEY_UP,
470 [ 0x1d ] = KEY_DOWN,
471 [ 0xf ] = KEY_CHANNELUP,
472 [ 0x13 ] = KEY_CHANNELDOWN,
473 [ 0x48 ] = KEY_ZOOM,
474
475 [ 0x1b ] = KEY_VIDEO, /* Video source */
476 [ 0x49 ] = KEY_LANGUAGE, /* MTS Select */
477 [ 0x19 ] = KEY_SEARCH, /* Auto Scan */
478
479 [ 0x4b ] = KEY_RECORD,
480 [ 0x46 ] = KEY_PLAY,
481 [ 0x45 ] = KEY_PAUSE, /* Pause */
482 [ 0x44 ] = KEY_STOP,
483 [ 0x40 ] = KEY_FORWARD, /* Forward ? */
484 [ 0x42 ] = KEY_REWIND, /* Backward ? */
485
486};
487
488/* Mapping for the 28 key remote control as seen at
489 http://www.sednacomputer.com/photo/cardbus-tv.jpg
490 Pavel Mihaylov <bin@bash.info> */
491static IR_KEYTAB_TYPE pctv_sedna_codes[IR_KEYTAB_SIZE] = {
492 [ 0 ] = KEY_KP0,
493 [ 1 ] = KEY_KP1,
494 [ 2 ] = KEY_KP2,
495 [ 3 ] = KEY_KP3,
496 [ 4 ] = KEY_KP4,
497 [ 5 ] = KEY_KP5,
498 [ 6 ] = KEY_KP6,
499 [ 7 ] = KEY_KP7,
500 [ 8 ] = KEY_KP8,
501 [ 9 ] = KEY_KP9,
502
503 [ 0x0a ] = KEY_AGAIN, /* Recall */
504 [ 0x0b ] = KEY_CHANNELUP,
505 [ 0x0c ] = KEY_VOLUMEUP,
506 [ 0x0d ] = KEY_MODE, /* Stereo */
507 [ 0x0e ] = KEY_STOP,
508 [ 0x0f ] = KEY_PREVIOUSSONG,
509 [ 0x10 ] = KEY_ZOOM,
510 [ 0x11 ] = KEY_TUNER, /* Source */
511 [ 0x12 ] = KEY_POWER,
512 [ 0x13 ] = KEY_MUTE,
513 [ 0x15 ] = KEY_CHANNELDOWN,
514 [ 0x18 ] = KEY_VOLUMEDOWN,
515 [ 0x19 ] = KEY_SHUFFLE, /* Snapshot */
516 [ 0x1a ] = KEY_NEXTSONG,
517 [ 0x1b ] = KEY_TEXT, /* Time Shift */
518 [ 0x1c ] = KEY_RADIO, /* FM Radio */
519 [ 0x1d ] = KEY_RECORD,
520 [ 0x1e ] = KEY_PAUSE,
521};
522
523
524/* -------------------- GPIO generic keycode builder -------------------- */ 45/* -------------------- GPIO generic keycode builder -------------------- */
525 46
526static int build_key(struct saa7134_dev *dev) 47static int build_key(struct saa7134_dev *dev)
@@ -628,27 +149,27 @@ int saa7134_input_init1(struct saa7134_dev *dev)
628 case SAA7134_BOARD_FLYVIDEO3000: 149 case SAA7134_BOARD_FLYVIDEO3000:
629 case SAA7134_BOARD_FLYTVPLATINUM_FM: 150 case SAA7134_BOARD_FLYTVPLATINUM_FM:
630 case SAA7134_BOARD_FLYTVPLATINUM_MINI2: 151 case SAA7134_BOARD_FLYTVPLATINUM_MINI2:
631 ir_codes = flyvideo_codes; 152 ir_codes = ir_codes_flyvideo;
632 mask_keycode = 0xEC00000; 153 mask_keycode = 0xEC00000;
633 mask_keydown = 0x0040000; 154 mask_keydown = 0x0040000;
634 break; 155 break;
635 case SAA7134_BOARD_CINERGY400: 156 case SAA7134_BOARD_CINERGY400:
636 case SAA7134_BOARD_CINERGY600: 157 case SAA7134_BOARD_CINERGY600:
637 case SAA7134_BOARD_CINERGY600_MK3: 158 case SAA7134_BOARD_CINERGY600_MK3:
638 ir_codes = cinergy_codes; 159 ir_codes = ir_codes_cinergy;
639 mask_keycode = 0x00003f; 160 mask_keycode = 0x00003f;
640 mask_keyup = 0x040000; 161 mask_keyup = 0x040000;
641 break; 162 break;
642 case SAA7134_BOARD_ECS_TVP3XP: 163 case SAA7134_BOARD_ECS_TVP3XP:
643 case SAA7134_BOARD_ECS_TVP3XP_4CB5: 164 case SAA7134_BOARD_ECS_TVP3XP_4CB5:
644 ir_codes = eztv_codes; 165 ir_codes = ir_codes_eztv;
645 mask_keycode = 0x00017c; 166 mask_keycode = 0x00017c;
646 mask_keyup = 0x000002; 167 mask_keyup = 0x000002;
647 polling = 50; // ms 168 polling = 50; // ms
648 break; 169 break;
649 case SAA7134_BOARD_KWORLD_XPERT: 170 case SAA7134_BOARD_KWORLD_XPERT:
650 case SAA7134_BOARD_AVACSSMARTTV: 171 case SAA7134_BOARD_AVACSSMARTTV:
651 ir_codes = avacssmart_codes; 172 ir_codes = ir_codes_pixelview;
652 mask_keycode = 0x00001F; 173 mask_keycode = 0x00001F;
653 mask_keyup = 0x000020; 174 mask_keyup = 0x000020;
654 polling = 50; // ms 175 polling = 50; // ms
@@ -660,7 +181,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
660 case SAA7134_BOARD_AVERMEDIA_STUDIO_305: 181 case SAA7134_BOARD_AVERMEDIA_STUDIO_305:
661 case SAA7134_BOARD_AVERMEDIA_STUDIO_307: 182 case SAA7134_BOARD_AVERMEDIA_STUDIO_307:
662 case SAA7134_BOARD_AVERMEDIA_GO_007_FM: 183 case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
663 ir_codes = md2819_codes; 184 ir_codes = ir_codes_avermedia;
664 mask_keycode = 0x0007C8; 185 mask_keycode = 0x0007C8;
665 mask_keydown = 0x000010; 186 mask_keydown = 0x000010;
666 polling = 50; // ms 187 polling = 50; // ms
@@ -669,7 +190,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
669 saa_setb(SAA7134_GPIO_GPSTATUS0, 0x4); 190 saa_setb(SAA7134_GPIO_GPSTATUS0, 0x4);
670 break; 191 break;
671 case SAA7134_BOARD_KWORLD_TERMINATOR: 192 case SAA7134_BOARD_KWORLD_TERMINATOR:
672 ir_codes = avacssmart_codes; 193 ir_codes = ir_codes_pixelview;
673 mask_keycode = 0x00001f; 194 mask_keycode = 0x00001f;
674 mask_keyup = 0x000060; 195 mask_keyup = 0x000060;
675 polling = 50; // ms 196 polling = 50; // ms
@@ -677,19 +198,19 @@ int saa7134_input_init1(struct saa7134_dev *dev)
677 case SAA7134_BOARD_MANLI_MTV001: 198 case SAA7134_BOARD_MANLI_MTV001:
678 case SAA7134_BOARD_MANLI_MTV002: 199 case SAA7134_BOARD_MANLI_MTV002:
679 case SAA7134_BOARD_BEHOLD_409FM: 200 case SAA7134_BOARD_BEHOLD_409FM:
680 ir_codes = manli_codes; 201 ir_codes = ir_codes_manli;
681 mask_keycode = 0x001f00; 202 mask_keycode = 0x001f00;
682 mask_keyup = 0x004000; 203 mask_keyup = 0x004000;
683 polling = 50; // ms 204 polling = 50; // ms
684 break; 205 break;
685 case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS: 206 case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS:
686 ir_codes = pctv_sedna_codes; 207 ir_codes = ir_codes_pctv_sedna;
687 mask_keycode = 0x001f00; 208 mask_keycode = 0x001f00;
688 mask_keyup = 0x004000; 209 mask_keyup = 0x004000;
689 polling = 50; // ms 210 polling = 50; // ms
690 break; 211 break;
691 case SAA7134_BOARD_GOTVIEW_7135: 212 case SAA7134_BOARD_GOTVIEW_7135:
692 ir_codes = gotview7135_codes; 213 ir_codes = ir_codes_gotview7135;
693 mask_keycode = 0x0003EC; 214 mask_keycode = 0x0003EC;
694 mask_keyup = 0x008000; 215 mask_keyup = 0x008000;
695 mask_keydown = 0x000010; 216 mask_keydown = 0x000010;
@@ -698,17 +219,23 @@ int saa7134_input_init1(struct saa7134_dev *dev)
698 case SAA7134_BOARD_VIDEOMATE_TV_PVR: 219 case SAA7134_BOARD_VIDEOMATE_TV_PVR:
699 case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS: 220 case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
700 case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII: 221 case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII:
701 ir_codes = videomate_tv_pvr_codes; 222 ir_codes = ir_codes_videomate_tv_pvr;
702 mask_keycode = 0x00003F; 223 mask_keycode = 0x00003F;
703 mask_keyup = 0x400000; 224 mask_keyup = 0x400000;
704 polling = 50; // ms 225 polling = 50; // ms
705 break; 226 break;
706 case SAA7134_BOARD_VIDEOMATE_DVBT_300: 227 case SAA7134_BOARD_VIDEOMATE_DVBT_300:
707 case SAA7134_BOARD_VIDEOMATE_DVBT_200: 228 case SAA7134_BOARD_VIDEOMATE_DVBT_200:
708 ir_codes = videomate_tv_pvr_codes; 229 ir_codes = ir_codes_videomate_tv_pvr;
709 mask_keycode = 0x003F00; 230 mask_keycode = 0x003F00;
710 mask_keyup = 0x040000; 231 mask_keyup = 0x040000;
711 break; 232 break;
233 case SAA7134_BOARD_FLYDVBT_LR301:
234 case SAA7134_BOARD_FLYDVBTDUO:
235 ir_codes = ir_codes_flydvb;
236 mask_keycode = 0x0001F00;
237 mask_keydown = 0x0040000;
238 break;
712 } 239 }
713 if (NULL == ir_codes) { 240 if (NULL == ir_codes) {
714 printk("%s: Oops: IR config error [card=%d]\n", 241 printk("%s: Oops: IR config error [card=%d]\n",
diff --git a/drivers/media/video/saa7134/saa7134-oss.c b/drivers/media/video/saa7134/saa7134-oss.c
index 7448e386a804..d79d05f88705 100644
--- a/drivers/media/video/saa7134/saa7134-oss.c
+++ b/drivers/media/video/saa7134/saa7134-oss.c
@@ -84,8 +84,7 @@ static int dsp_buffer_init(struct saa7134_dev *dev)
84{ 84{
85 int err; 85 int err;
86 86
87 if (!dev->dmasound.bufsize) 87 BUG_ON(!dev->dmasound.bufsize);
88 BUG();
89 videobuf_dma_init(&dev->dmasound.dma); 88 videobuf_dma_init(&dev->dmasound.dma);
90 err = videobuf_dma_init_kernel(&dev->dmasound.dma, PCI_DMA_FROMDEVICE, 89 err = videobuf_dma_init_kernel(&dev->dmasound.dma, PCI_DMA_FROMDEVICE,
91 (dev->dmasound.bufsize + PAGE_SIZE) >> PAGE_SHIFT); 90 (dev->dmasound.bufsize + PAGE_SIZE) >> PAGE_SHIFT);
@@ -96,8 +95,7 @@ static int dsp_buffer_init(struct saa7134_dev *dev)
96 95
97static int dsp_buffer_free(struct saa7134_dev *dev) 96static int dsp_buffer_free(struct saa7134_dev *dev)
98{ 97{
99 if (!dev->dmasound.blksize) 98 BUG_ON(!dev->dmasound.blksize);
100 BUG();
101 videobuf_dma_free(&dev->dmasound.dma); 99 videobuf_dma_free(&dev->dmasound.dma);
102 dev->dmasound.blocks = 0; 100 dev->dmasound.blocks = 0;
103 dev->dmasound.blksize = 0; 101 dev->dmasound.blksize = 0;
@@ -254,7 +252,7 @@ static int dsp_open(struct inode *inode, struct file *file)
254 if (NULL == dev) 252 if (NULL == dev)
255 return -ENODEV; 253 return -ENODEV;
256 254
257 down(&dev->dmasound.lock); 255 mutex_lock(&dev->dmasound.lock);
258 err = -EBUSY; 256 err = -EBUSY;
259 if (dev->dmasound.users_dsp) 257 if (dev->dmasound.users_dsp)
260 goto fail1; 258 goto fail1;
@@ -270,13 +268,13 @@ static int dsp_open(struct inode *inode, struct file *file)
270 if (0 != err) 268 if (0 != err)
271 goto fail2; 269 goto fail2;
272 270
273 up(&dev->dmasound.lock); 271 mutex_unlock(&dev->dmasound.lock);
274 return 0; 272 return 0;
275 273
276 fail2: 274 fail2:
277 dev->dmasound.users_dsp--; 275 dev->dmasound.users_dsp--;
278 fail1: 276 fail1:
279 up(&dev->dmasound.lock); 277 mutex_unlock(&dev->dmasound.lock);
280 return err; 278 return err;
281} 279}
282 280
@@ -284,13 +282,13 @@ static int dsp_release(struct inode *inode, struct file *file)
284{ 282{
285 struct saa7134_dev *dev = file->private_data; 283 struct saa7134_dev *dev = file->private_data;
286 284
287 down(&dev->dmasound.lock); 285 mutex_lock(&dev->dmasound.lock);
288 if (dev->dmasound.recording_on) 286 if (dev->dmasound.recording_on)
289 dsp_rec_stop(dev); 287 dsp_rec_stop(dev);
290 dsp_buffer_free(dev); 288 dsp_buffer_free(dev);
291 dev->dmasound.users_dsp--; 289 dev->dmasound.users_dsp--;
292 file->private_data = NULL; 290 file->private_data = NULL;
293 up(&dev->dmasound.lock); 291 mutex_unlock(&dev->dmasound.lock);
294 return 0; 292 return 0;
295} 293}
296 294
@@ -304,7 +302,7 @@ static ssize_t dsp_read(struct file *file, char __user *buffer,
304 int err,ret = 0; 302 int err,ret = 0;
305 303
306 add_wait_queue(&dev->dmasound.wq, &wait); 304 add_wait_queue(&dev->dmasound.wq, &wait);
307 down(&dev->dmasound.lock); 305 mutex_lock(&dev->dmasound.lock);
308 while (count > 0) { 306 while (count > 0) {
309 /* wait for data if needed */ 307 /* wait for data if needed */
310 if (0 == dev->dmasound.read_count) { 308 if (0 == dev->dmasound.read_count) {
@@ -328,12 +326,12 @@ static ssize_t dsp_read(struct file *file, char __user *buffer,
328 ret = -EAGAIN; 326 ret = -EAGAIN;
329 break; 327 break;
330 } 328 }
331 up(&dev->dmasound.lock); 329 mutex_unlock(&dev->dmasound.lock);
332 set_current_state(TASK_INTERRUPTIBLE); 330 set_current_state(TASK_INTERRUPTIBLE);
333 if (0 == dev->dmasound.read_count) 331 if (0 == dev->dmasound.read_count)
334 schedule(); 332 schedule();
335 set_current_state(TASK_RUNNING); 333 set_current_state(TASK_RUNNING);
336 down(&dev->dmasound.lock); 334 mutex_lock(&dev->dmasound.lock);
337 if (signal_pending(current)) { 335 if (signal_pending(current)) {
338 if (0 == ret) 336 if (0 == ret)
339 ret = -EINTR; 337 ret = -EINTR;
@@ -362,7 +360,7 @@ static ssize_t dsp_read(struct file *file, char __user *buffer,
362 if (dev->dmasound.read_offset == dev->dmasound.bufsize) 360 if (dev->dmasound.read_offset == dev->dmasound.bufsize)
363 dev->dmasound.read_offset = 0; 361 dev->dmasound.read_offset = 0;
364 } 362 }
365 up(&dev->dmasound.lock); 363 mutex_unlock(&dev->dmasound.lock);
366 remove_wait_queue(&dev->dmasound.wq, &wait); 364 remove_wait_queue(&dev->dmasound.wq, &wait);
367 return ret; 365 return ret;
368} 366}
@@ -435,13 +433,13 @@ static int dsp_ioctl(struct inode *inode, struct file *file,
435 case SNDCTL_DSP_STEREO: 433 case SNDCTL_DSP_STEREO:
436 if (get_user(val, p)) 434 if (get_user(val, p))
437 return -EFAULT; 435 return -EFAULT;
438 down(&dev->dmasound.lock); 436 mutex_lock(&dev->dmasound.lock);
439 dev->dmasound.channels = val ? 2 : 1; 437 dev->dmasound.channels = val ? 2 : 1;
440 if (dev->dmasound.recording_on) { 438 if (dev->dmasound.recording_on) {
441 dsp_rec_stop(dev); 439 dsp_rec_stop(dev);
442 dsp_rec_start(dev); 440 dsp_rec_start(dev);
443 } 441 }
444 up(&dev->dmasound.lock); 442 mutex_unlock(&dev->dmasound.lock);
445 return put_user(dev->dmasound.channels-1, p); 443 return put_user(dev->dmasound.channels-1, p);
446 444
447 case SNDCTL_DSP_CHANNELS: 445 case SNDCTL_DSP_CHANNELS:
@@ -449,13 +447,13 @@ static int dsp_ioctl(struct inode *inode, struct file *file,
449 return -EFAULT; 447 return -EFAULT;
450 if (val != 1 && val != 2) 448 if (val != 1 && val != 2)
451 return -EINVAL; 449 return -EINVAL;
452 down(&dev->dmasound.lock); 450 mutex_lock(&dev->dmasound.lock);
453 dev->dmasound.channels = val; 451 dev->dmasound.channels = val;
454 if (dev->dmasound.recording_on) { 452 if (dev->dmasound.recording_on) {
455 dsp_rec_stop(dev); 453 dsp_rec_stop(dev);
456 dsp_rec_start(dev); 454 dsp_rec_start(dev);
457 } 455 }
458 up(&dev->dmasound.lock); 456 mutex_unlock(&dev->dmasound.lock);
459 /* fall through */ 457 /* fall through */
460 case SOUND_PCM_READ_CHANNELS: 458 case SOUND_PCM_READ_CHANNELS:
461 return put_user(dev->dmasound.channels, p); 459 return put_user(dev->dmasound.channels, p);
@@ -478,13 +476,13 @@ static int dsp_ioctl(struct inode *inode, struct file *file,
478 case AFMT_U16_BE: 476 case AFMT_U16_BE:
479 case AFMT_S16_LE: 477 case AFMT_S16_LE:
480 case AFMT_S16_BE: 478 case AFMT_S16_BE:
481 down(&dev->dmasound.lock); 479 mutex_lock(&dev->dmasound.lock);
482 dev->dmasound.afmt = val; 480 dev->dmasound.afmt = val;
483 if (dev->dmasound.recording_on) { 481 if (dev->dmasound.recording_on) {
484 dsp_rec_stop(dev); 482 dsp_rec_stop(dev);
485 dsp_rec_start(dev); 483 dsp_rec_start(dev);
486 } 484 }
487 up(&dev->dmasound.lock); 485 mutex_unlock(&dev->dmasound.lock);
488 return put_user(dev->dmasound.afmt, p); 486 return put_user(dev->dmasound.afmt, p);
489 default: 487 default:
490 return -EINVAL; 488 return -EINVAL;
@@ -509,10 +507,10 @@ static int dsp_ioctl(struct inode *inode, struct file *file,
509 return 0; 507 return 0;
510 508
511 case SNDCTL_DSP_RESET: 509 case SNDCTL_DSP_RESET:
512 down(&dev->dmasound.lock); 510 mutex_lock(&dev->dmasound.lock);
513 if (dev->dmasound.recording_on) 511 if (dev->dmasound.recording_on)
514 dsp_rec_stop(dev); 512 dsp_rec_stop(dev);
515 up(&dev->dmasound.lock); 513 mutex_unlock(&dev->dmasound.lock);
516 return 0; 514 return 0;
517 case SNDCTL_DSP_GETBLKSIZE: 515 case SNDCTL_DSP_GETBLKSIZE:
518 return put_user(dev->dmasound.blksize, p); 516 return put_user(dev->dmasound.blksize, p);
@@ -556,10 +554,10 @@ static unsigned int dsp_poll(struct file *file, struct poll_table_struct *wait)
556 poll_wait(file, &dev->dmasound.wq, wait); 554 poll_wait(file, &dev->dmasound.wq, wait);
557 555
558 if (0 == dev->dmasound.read_count) { 556 if (0 == dev->dmasound.read_count) {
559 down(&dev->dmasound.lock); 557 mutex_lock(&dev->dmasound.lock);
560 if (!dev->dmasound.recording_on) 558 if (!dev->dmasound.recording_on)
561 dsp_rec_start(dev); 559 dsp_rec_start(dev);
562 up(&dev->dmasound.lock); 560 mutex_unlock(&dev->dmasound.lock);
563 } else 561 } else
564 mask |= (POLLIN | POLLRDNORM); 562 mask |= (POLLIN | POLLRDNORM);
565 return mask; 563 return mask;
@@ -852,7 +850,7 @@ int saa7134_oss_init1(struct saa7134_dev *dev)
852 return -1; 850 return -1;
853 851
854 /* general */ 852 /* general */
855 init_MUTEX(&dev->dmasound.lock); 853 mutex_init(&dev->dmasound.lock);
856 init_waitqueue_head(&dev->dmasound.wq); 854 init_waitqueue_head(&dev->dmasound.wq);
857 855
858 switch (dev->pci->device) { 856 switch (dev->pci->device) {
diff --git a/drivers/media/video/saa7134/saa7134-tvaudio.c b/drivers/media/video/saa7134/saa7134-tvaudio.c
index afa4dcb3f96d..3043233a8b6e 100644
--- a/drivers/media/video/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/video/saa7134/saa7134-tvaudio.c
@@ -140,6 +140,12 @@ static struct saa7134_tvaudio tvaudio[] = {
140 .carr2 = 5850, 140 .carr2 = 5850,
141 .mode = TVAUDIO_NICAM_AM, 141 .mode = TVAUDIO_NICAM_AM,
142 },{ 142 },{
143 .name = "SECAM-L MONO",
144 .std = V4L2_STD_SECAM,
145 .carr1 = 6500,
146 .carr2 = -1,
147 .mode = TVAUDIO_AM_MONO,
148 },{
143 .name = "SECAM-D/K", 149 .name = "SECAM-D/K",
144 .std = V4L2_STD_SECAM, 150 .std = V4L2_STD_SECAM,
145 .carr1 = 6500, 151 .carr1 = 6500,
@@ -334,6 +340,12 @@ static void tvaudio_setmode(struct saa7134_dev *dev,
334 saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa1); 340 saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa1);
335 saa_writeb(SAA7134_NICAM_CONFIG, 0x00); 341 saa_writeb(SAA7134_NICAM_CONFIG, 0x00);
336 break; 342 break;
343 case TVAUDIO_AM_MONO:
344 saa_writeb(SAA7134_DEMODULATOR, 0x12);
345 saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x00);
346 saa_writeb(SAA7134_FM_DEEMPHASIS, 0x44);
347 saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa0);
348 break;
337 case TVAUDIO_FM_SAT_STEREO: 349 case TVAUDIO_FM_SAT_STEREO:
338 /* not implemented (yet) */ 350 /* not implemented (yet) */
339 break; 351 break;
@@ -414,6 +426,7 @@ static int tvaudio_getstereo(struct saa7134_dev *dev, struct saa7134_tvaudio *au
414 426
415 switch (audio->mode) { 427 switch (audio->mode) {
416 case TVAUDIO_FM_MONO: 428 case TVAUDIO_FM_MONO:
429 case TVAUDIO_AM_MONO:
417 return V4L2_TUNER_SUB_MONO; 430 return V4L2_TUNER_SUB_MONO;
418 case TVAUDIO_FM_K_STEREO: 431 case TVAUDIO_FM_K_STEREO:
419 case TVAUDIO_FM_BG_STEREO: 432 case TVAUDIO_FM_BG_STEREO:
@@ -480,6 +493,7 @@ static int tvaudio_setstereo(struct saa7134_dev *dev, struct saa7134_tvaudio *au
480 493
481 switch (audio->mode) { 494 switch (audio->mode) {
482 case TVAUDIO_FM_MONO: 495 case TVAUDIO_FM_MONO:
496 case TVAUDIO_AM_MONO:
483 /* nothing to do ... */ 497 /* nothing to do ... */
484 break; 498 break;
485 case TVAUDIO_FM_K_STEREO: 499 case TVAUDIO_FM_K_STEREO:
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index e97426bc85df..57a11e71d996 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -460,17 +460,17 @@ static int res_get(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int
460 return 1; 460 return 1;
461 461
462 /* is it free? */ 462 /* is it free? */
463 down(&dev->lock); 463 mutex_lock(&dev->lock);
464 if (dev->resources & bit) { 464 if (dev->resources & bit) {
465 /* no, someone else uses it */ 465 /* no, someone else uses it */
466 up(&dev->lock); 466 mutex_unlock(&dev->lock);
467 return 0; 467 return 0;
468 } 468 }
469 /* it's free, grab it */ 469 /* it's free, grab it */
470 fh->resources |= bit; 470 fh->resources |= bit;
471 dev->resources |= bit; 471 dev->resources |= bit;
472 dprintk("res: get %d\n",bit); 472 dprintk("res: get %d\n",bit);
473 up(&dev->lock); 473 mutex_unlock(&dev->lock);
474 return 1; 474 return 1;
475} 475}
476 476
@@ -489,14 +489,13 @@ int res_locked(struct saa7134_dev *dev, unsigned int bit)
489static 489static
490void res_free(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int bits) 490void res_free(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int bits)
491{ 491{
492 if ((fh->resources & bits) != bits) 492 BUG_ON((fh->resources & bits) != bits);
493 BUG();
494 493
495 down(&dev->lock); 494 mutex_lock(&dev->lock);
496 fh->resources &= ~bits; 495 fh->resources &= ~bits;
497 dev->resources &= ~bits; 496 dev->resources &= ~bits;
498 dprintk("res: put %d\n",bits); 497 dprintk("res: put %d\n",bits);
499 up(&dev->lock); 498 mutex_unlock(&dev->lock);
500} 499}
501 500
502/* ------------------------------------------------------------------ */ 501/* ------------------------------------------------------------------ */
@@ -1340,21 +1339,21 @@ video_poll(struct file *file, struct poll_table_struct *wait)
1340 if (!list_empty(&fh->cap.stream)) 1339 if (!list_empty(&fh->cap.stream))
1341 buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream); 1340 buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream);
1342 } else { 1341 } else {
1343 down(&fh->cap.lock); 1342 mutex_lock(&fh->cap.lock);
1344 if (UNSET == fh->cap.read_off) { 1343 if (UNSET == fh->cap.read_off) {
1345 /* need to capture a new frame */ 1344 /* need to capture a new frame */
1346 if (res_locked(fh->dev,RESOURCE_VIDEO)) { 1345 if (res_locked(fh->dev,RESOURCE_VIDEO)) {
1347 up(&fh->cap.lock); 1346 mutex_unlock(&fh->cap.lock);
1348 return POLLERR; 1347 return POLLERR;
1349 } 1348 }
1350 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,fh->cap.field)) { 1349 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,fh->cap.field)) {
1351 up(&fh->cap.lock); 1350 mutex_unlock(&fh->cap.lock);
1352 return POLLERR; 1351 return POLLERR;
1353 } 1352 }
1354 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf); 1353 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
1355 fh->cap.read_off = 0; 1354 fh->cap.read_off = 0;
1356 } 1355 }
1357 up(&fh->cap.lock); 1356 mutex_unlock(&fh->cap.lock);
1358 buf = fh->cap.read_buf; 1357 buf = fh->cap.read_buf;
1359 } 1358 }
1360 1359
@@ -1463,6 +1462,10 @@ static int saa7134_g_fmt(struct saa7134_dev *dev, struct saa7134_fh *fh,
1463 f->fmt.pix.height * f->fmt.pix.bytesperline; 1462 f->fmt.pix.height * f->fmt.pix.bytesperline;
1464 return 0; 1463 return 0;
1465 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1464 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1465 if (saa7134_no_overlay > 0) {
1466 printk ("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
1467 return -EINVAL;
1468 }
1466 f->fmt.win = fh->win; 1469 f->fmt.win = fh->win;
1467 return 0; 1470 return 0;
1468 case V4L2_BUF_TYPE_VBI_CAPTURE: 1471 case V4L2_BUF_TYPE_VBI_CAPTURE:
@@ -1527,6 +1530,10 @@ static int saa7134_try_fmt(struct saa7134_dev *dev, struct saa7134_fh *fh,
1527 return 0; 1530 return 0;
1528 } 1531 }
1529 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1532 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1533 if (saa7134_no_overlay > 0) {
1534 printk ("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
1535 return -EINVAL;
1536 }
1530 err = verify_preview(dev,&f->fmt.win); 1537 err = verify_preview(dev,&f->fmt.win);
1531 if (0 != err) 1538 if (0 != err)
1532 return err; 1539 return err;
@@ -1557,18 +1564,22 @@ static int saa7134_s_fmt(struct saa7134_dev *dev, struct saa7134_fh *fh,
1557 fh->cap.field = f->fmt.pix.field; 1564 fh->cap.field = f->fmt.pix.field;
1558 return 0; 1565 return 0;
1559 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1566 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1567 if (saa7134_no_overlay > 0) {
1568 printk ("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
1569 return -EINVAL;
1570 }
1560 err = verify_preview(dev,&f->fmt.win); 1571 err = verify_preview(dev,&f->fmt.win);
1561 if (0 != err) 1572 if (0 != err)
1562 return err; 1573 return err;
1563 1574
1564 down(&dev->lock); 1575 mutex_lock(&dev->lock);
1565 fh->win = f->fmt.win; 1576 fh->win = f->fmt.win;
1566 fh->nclips = f->fmt.win.clipcount; 1577 fh->nclips = f->fmt.win.clipcount;
1567 if (fh->nclips > 8) 1578 if (fh->nclips > 8)
1568 fh->nclips = 8; 1579 fh->nclips = 8;
1569 if (copy_from_user(fh->clips,f->fmt.win.clips, 1580 if (copy_from_user(fh->clips,f->fmt.win.clips,
1570 sizeof(struct v4l2_clip)*fh->nclips)) { 1581 sizeof(struct v4l2_clip)*fh->nclips)) {
1571 up(&dev->lock); 1582 mutex_unlock(&dev->lock);
1572 return -EFAULT; 1583 return -EFAULT;
1573 } 1584 }
1574 1585
@@ -1578,7 +1589,7 @@ static int saa7134_s_fmt(struct saa7134_dev *dev, struct saa7134_fh *fh,
1578 start_preview(dev,fh); 1589 start_preview(dev,fh);
1579 spin_unlock_irqrestore(&dev->slock,flags); 1590 spin_unlock_irqrestore(&dev->slock,flags);
1580 } 1591 }
1581 up(&dev->lock); 1592 mutex_unlock(&dev->lock);
1582 return 0; 1593 return 0;
1583 case V4L2_BUF_TYPE_VBI_CAPTURE: 1594 case V4L2_BUF_TYPE_VBI_CAPTURE:
1584 saa7134_vbi_fmt(dev,f); 1595 saa7134_vbi_fmt(dev,f);
@@ -1612,9 +1623,9 @@ int saa7134_common_ioctl(struct saa7134_dev *dev,
1612 return get_control(dev,arg); 1623 return get_control(dev,arg);
1613 case VIDIOC_S_CTRL: 1624 case VIDIOC_S_CTRL:
1614 { 1625 {
1615 down(&dev->lock); 1626 mutex_lock(&dev->lock);
1616 err = set_control(dev,NULL,arg); 1627 err = set_control(dev,NULL,arg);
1617 up(&dev->lock); 1628 mutex_unlock(&dev->lock);
1618 return err; 1629 return err;
1619 } 1630 }
1620 /* --- input switching --------------------------------------- */ 1631 /* --- input switching --------------------------------------- */
@@ -1664,9 +1675,9 @@ int saa7134_common_ioctl(struct saa7134_dev *dev,
1664 return -EINVAL; 1675 return -EINVAL;
1665 if (NULL == card_in(dev,*i).name) 1676 if (NULL == card_in(dev,*i).name)
1666 return -EINVAL; 1677 return -EINVAL;
1667 down(&dev->lock); 1678 mutex_lock(&dev->lock);
1668 video_mux(dev,*i); 1679 video_mux(dev,*i);
1669 up(&dev->lock); 1680 mutex_unlock(&dev->lock);
1670 return 0; 1681 return 0;
1671 } 1682 }
1672 1683
@@ -1716,11 +1727,13 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
1716 cap->version = SAA7134_VERSION_CODE; 1727 cap->version = SAA7134_VERSION_CODE;
1717 cap->capabilities = 1728 cap->capabilities =
1718 V4L2_CAP_VIDEO_CAPTURE | 1729 V4L2_CAP_VIDEO_CAPTURE |
1719 V4L2_CAP_VIDEO_OVERLAY |
1720 V4L2_CAP_VBI_CAPTURE | 1730 V4L2_CAP_VBI_CAPTURE |
1721 V4L2_CAP_READWRITE | 1731 V4L2_CAP_READWRITE |
1722 V4L2_CAP_STREAMING | 1732 V4L2_CAP_STREAMING |
1723 V4L2_CAP_TUNER; 1733 V4L2_CAP_TUNER;
1734 if (saa7134_no_overlay <= 0) {
1735 cap->capabilities |= V4L2_CAP_VIDEO_OVERLAY;
1736 }
1724 1737
1725 if ((tuner_type == TUNER_ABSENT) || (tuner_type == UNSET)) 1738 if ((tuner_type == TUNER_ABSENT) || (tuner_type == UNSET))
1726 cap->capabilities &= ~V4L2_CAP_TUNER; 1739 cap->capabilities &= ~V4L2_CAP_TUNER;
@@ -1766,7 +1779,7 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
1766 if (i == TVNORMS) 1779 if (i == TVNORMS)
1767 return -EINVAL; 1780 return -EINVAL;
1768 1781
1769 down(&dev->lock); 1782 mutex_lock(&dev->lock);
1770 if (res_check(fh, RESOURCE_OVERLAY)) { 1783 if (res_check(fh, RESOURCE_OVERLAY)) {
1771 spin_lock_irqsave(&dev->slock,flags); 1784 spin_lock_irqsave(&dev->slock,flags);
1772 stop_preview(dev,fh); 1785 stop_preview(dev,fh);
@@ -1776,7 +1789,7 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
1776 } else 1789 } else
1777 set_tvnorm(dev,&tvnorms[i]); 1790 set_tvnorm(dev,&tvnorms[i]);
1778 saa7134_tvaudio_do_scan(dev); 1791 saa7134_tvaudio_do_scan(dev);
1779 up(&dev->lock); 1792 mutex_unlock(&dev->lock);
1780 return 0; 1793 return 0;
1781 } 1794 }
1782 1795
@@ -1909,13 +1922,13 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
1909 return -EINVAL; 1922 return -EINVAL;
1910 if (1 == fh->radio && V4L2_TUNER_RADIO != f->type) 1923 if (1 == fh->radio && V4L2_TUNER_RADIO != f->type)
1911 return -EINVAL; 1924 return -EINVAL;
1912 down(&dev->lock); 1925 mutex_lock(&dev->lock);
1913 dev->ctl_freq = f->frequency; 1926 dev->ctl_freq = f->frequency;
1914 1927
1915 saa7134_i2c_call_clients(dev,VIDIOC_S_FREQUENCY,f); 1928 saa7134_i2c_call_clients(dev,VIDIOC_S_FREQUENCY,f);
1916 1929
1917 saa7134_tvaudio_do_scan(dev); 1930 saa7134_tvaudio_do_scan(dev);
1918 up(&dev->lock); 1931 mutex_unlock(&dev->lock);
1919 return 0; 1932 return 0;
1920 } 1933 }
1921 1934
@@ -1971,6 +1984,10 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
1971 switch (type) { 1984 switch (type) {
1972 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1985 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1973 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1986 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1987 if (saa7134_no_overlay > 0) {
1988 printk ("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
1989 return -EINVAL;
1990 }
1974 if (index >= FORMATS) 1991 if (index >= FORMATS)
1975 return -EINVAL; 1992 return -EINVAL;
1976 if (f->type == V4L2_BUF_TYPE_VIDEO_OVERLAY && 1993 if (f->type == V4L2_BUF_TYPE_VIDEO_OVERLAY &&
@@ -2031,6 +2048,11 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
2031 int *on = arg; 2048 int *on = arg;
2032 2049
2033 if (*on) { 2050 if (*on) {
2051 if (saa7134_no_overlay > 0) {
2052 printk ("no_overlay\n");
2053 return -EINVAL;
2054 }
2055
2034 if (!res_get(dev,fh,RESOURCE_OVERLAY)) 2056 if (!res_get(dev,fh,RESOURCE_OVERLAY))
2035 return -EBUSY; 2057 return -EBUSY;
2036 spin_lock_irqsave(&dev->slock,flags); 2058 spin_lock_irqsave(&dev->slock,flags);
@@ -2282,7 +2304,7 @@ static struct file_operations radio_fops =
2282struct video_device saa7134_video_template = 2304struct video_device saa7134_video_template =
2283{ 2305{
2284 .name = "saa7134-video", 2306 .name = "saa7134-video",
2285 .type = VID_TYPE_CAPTURE|VID_TYPE_TUNER|VID_TYPE_OVERLAY| 2307 .type = VID_TYPE_CAPTURE|VID_TYPE_TUNER|
2286 VID_TYPE_CLIPPING|VID_TYPE_SCALES, 2308 VID_TYPE_CLIPPING|VID_TYPE_SCALES,
2287 .hardware = 0, 2309 .hardware = 0,
2288 .fops = &video_fops, 2310 .fops = &video_fops,
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 3261d8bebdd1..17ba34f30760 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -29,6 +29,7 @@
29#include <linux/input.h> 29#include <linux/input.h>
30#include <linux/notifier.h> 30#include <linux/notifier.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/mutex.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34 35
@@ -60,6 +61,7 @@ enum saa7134_tvaudio_mode {
60 TVAUDIO_FM_K_STEREO = 4, 61 TVAUDIO_FM_K_STEREO = 4,
61 TVAUDIO_NICAM_AM = 5, 62 TVAUDIO_NICAM_AM = 5,
62 TVAUDIO_NICAM_FM = 6, 63 TVAUDIO_NICAM_FM = 6,
64 TVAUDIO_AM_MONO = 7
63}; 65};
64 66
65enum saa7134_audio_in { 67enum saa7134_audio_in {
@@ -210,6 +212,15 @@ struct saa7134_format {
210#define SAA7134_BOARD_MSI_TVATANYWHERE_PLUS 82 212#define SAA7134_BOARD_MSI_TVATANYWHERE_PLUS 82
211#define SAA7134_BOARD_CINERGY250PCI 83 213#define SAA7134_BOARD_CINERGY250PCI 83
212#define SAA7134_BOARD_FLYDVB_TRIO 84 214#define SAA7134_BOARD_FLYDVB_TRIO 84
215#define SAA7134_BOARD_AVERMEDIA_777 85
216#define SAA7134_BOARD_FLYDVBT_LR301 86
217#define SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331 87
218#define SAA7134_BOARD_TEVION_DVBT_220RF 88
219#define SAA7134_BOARD_ELSA_700TV 89
220#define SAA7134_BOARD_KWORLD_ATSC110 90
221#define SAA7134_BOARD_AVERMEDIA_A169_B 91
222#define SAA7134_BOARD_AVERMEDIA_A169_B1 92
223#define SAA7134_BOARD_MD7134_BRIDGE_2 93
213 224
214#define SAA7134_MAXBOARDS 8 225#define SAA7134_MAXBOARDS 8
215#define SAA7134_INPUT_MAX 8 226#define SAA7134_INPUT_MAX 8
@@ -359,7 +370,7 @@ struct saa7134_fh {
359 370
360/* dmasound dsp status */ 371/* dmasound dsp status */
361struct saa7134_dmasound { 372struct saa7134_dmasound {
362 struct semaphore lock; 373 struct mutex lock;
363 int minor_mixer; 374 int minor_mixer;
364 int minor_dsp; 375 int minor_dsp;
365 unsigned int users_dsp; 376 unsigned int users_dsp;
@@ -423,7 +434,7 @@ struct saa7134_mpeg_ops {
423/* global device status */ 434/* global device status */
424struct saa7134_dev { 435struct saa7134_dev {
425 struct list_head devlist; 436 struct list_head devlist;
426 struct semaphore lock; 437 struct mutex lock;
427 spinlock_t slock; 438 spinlock_t slock;
428#ifdef VIDIOC_G_PRIORITY 439#ifdef VIDIOC_G_PRIORITY
429 struct v4l2_prio_state prio; 440 struct v4l2_prio_state prio;
@@ -546,6 +557,7 @@ struct saa7134_dev {
546/* saa7134-core.c */ 557/* saa7134-core.c */
547 558
548extern struct list_head saa7134_devlist; 559extern struct list_head saa7134_devlist;
560extern int saa7134_no_overlay;
549 561
550void saa7134_track_gpio(struct saa7134_dev *dev, char *msg); 562void saa7134_track_gpio(struct saa7134_dev *dev, char *msg);
551 563
diff --git a/drivers/media/video/tda8290.c b/drivers/media/video/tda8290.c
index a796a4e1917c..027c8a074dfe 100644
--- a/drivers/media/video/tda8290.c
+++ b/drivers/media/video/tda8290.c
@@ -281,7 +281,7 @@ static void tda827xa_agcf(struct i2c_client *c)
281static void tda8290_i2c_bridge(struct i2c_client *c, int close) 281static void tda8290_i2c_bridge(struct i2c_client *c, int close)
282{ 282{
283 unsigned char enable[2] = { 0x21, 0xC0 }; 283 unsigned char enable[2] = { 0x21, 0xC0 };
284 unsigned char disable[2] = { 0x21, 0x80 }; 284 unsigned char disable[2] = { 0x21, 0x00 };
285 unsigned char *msg; 285 unsigned char *msg;
286 if(close) { 286 if(close) {
287 msg = enable; 287 msg = enable;
@@ -302,6 +302,7 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
302 unsigned char soft_reset[] = { 0x00, 0x00 }; 302 unsigned char soft_reset[] = { 0x00, 0x00 };
303 unsigned char easy_mode[] = { 0x01, t->tda8290_easy_mode }; 303 unsigned char easy_mode[] = { 0x01, t->tda8290_easy_mode };
304 unsigned char expert_mode[] = { 0x01, 0x80 }; 304 unsigned char expert_mode[] = { 0x01, 0x80 };
305 unsigned char agc_out_on[] = { 0x02, 0x00 };
305 unsigned char gainset_off[] = { 0x28, 0x14 }; 306 unsigned char gainset_off[] = { 0x28, 0x14 };
306 unsigned char if_agc_spd[] = { 0x0f, 0x88 }; 307 unsigned char if_agc_spd[] = { 0x0f, 0x88 };
307 unsigned char adc_head_6[] = { 0x05, 0x04 }; 308 unsigned char adc_head_6[] = { 0x05, 0x04 };
@@ -320,6 +321,7 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
320 pll_stat; 321 pll_stat;
321 322
322 i2c_master_send(c, easy_mode, 2); 323 i2c_master_send(c, easy_mode, 2);
324 i2c_master_send(c, agc_out_on, 2);
323 i2c_master_send(c, soft_reset, 2); 325 i2c_master_send(c, soft_reset, 2);
324 msleep(1); 326 msleep(1);
325 327
@@ -470,6 +472,7 @@ static void standby(struct i2c_client *c)
470 struct tuner *t = i2c_get_clientdata(c); 472 struct tuner *t = i2c_get_clientdata(c);
471 unsigned char cb1[] = { 0x30, 0xD0 }; 473 unsigned char cb1[] = { 0x30, 0xD0 };
472 unsigned char tda8290_standby[] = { 0x00, 0x02 }; 474 unsigned char tda8290_standby[] = { 0x00, 0x02 };
475 unsigned char tda8290_agc_tri[] = { 0x02, 0x20 };
473 struct i2c_msg msg = {.addr = t->tda827x_addr, .flags=0, .buf=cb1, .len = 2}; 476 struct i2c_msg msg = {.addr = t->tda827x_addr, .flags=0, .buf=cb1, .len = 2};
474 477
475 tda8290_i2c_bridge(c, 1); 478 tda8290_i2c_bridge(c, 1);
@@ -477,6 +480,7 @@ static void standby(struct i2c_client *c)
477 cb1[1] = 0x90; 480 cb1[1] = 0x90;
478 i2c_transfer(c->adapter, &msg, 1); 481 i2c_transfer(c->adapter, &msg, 1);
479 tda8290_i2c_bridge(c, 0); 482 tda8290_i2c_bridge(c, 0);
483 i2c_master_send(c, tda8290_agc_tri, 2);
480 i2c_master_send(c, tda8290_standby, 2); 484 i2c_master_send(c, tda8290_standby, 2);
481} 485}
482 486
@@ -565,7 +569,7 @@ int tda8290_init(struct i2c_client *c)
565 strlcpy(c->name, "tda8290+75a", sizeof(c->name)); 569 strlcpy(c->name, "tda8290+75a", sizeof(c->name));
566 t->tda827x_ver = 2; 570 t->tda827x_ver = 2;
567 } 571 }
568 tuner_info("tuner: type set to %s\n", c->name); 572 tuner_info("type set to %s\n", c->name);
569 573
570 t->set_tv_freq = set_tv_freq; 574 t->set_tv_freq = set_tv_freq;
571 t->set_radio_freq = set_radio_freq; 575 t->set_radio_freq = set_radio_freq;
diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
index ed4c04119ccc..0243700f58ae 100644
--- a/drivers/media/video/tda9840.c
+++ b/drivers/media/video/tda9840.c
@@ -24,6 +24,7 @@
24 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */ 25 */
26 26
27
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/ioctl.h> 29#include <linux/ioctl.h>
29#include <linux/i2c.h> 30#include <linux/i2c.h>
@@ -222,7 +223,7 @@ static int detach(struct i2c_client *client)
222 223
223static struct i2c_driver driver = { 224static struct i2c_driver driver = {
224 .driver = { 225 .driver = {
225 .name = "tda9840", 226 .name = "tda9840",
226 }, 227 },
227 .id = I2C_DRIVERID_TDA9840, 228 .id = I2C_DRIVERID_TDA9840,
228 .attach_adapter = attach, 229 .attach_adapter = attach,
diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
index bb35844e3842..774ed0dbc56d 100644
--- a/drivers/media/video/tea6415c.c
+++ b/drivers/media/video/tea6415c.c
@@ -26,6 +26,7 @@
26 Foundation, Inc., 675 Mvss Ave, Cambridge, MA 02139, USA. 26 Foundation, Inc., 675 Mvss Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/ioctl.h> 31#include <linux/ioctl.h>
31#include <linux/i2c.h> 32#include <linux/i2c.h>
@@ -107,7 +108,7 @@ static int switch_matrix(struct i2c_client *client, int i, int o)
107{ 108{
108 u8 byte = 0; 109 u8 byte = 0;
109 int ret; 110 int ret;
110 111
111 dprintk("adr:0x%02x, i:%d, o:%d\n", client->addr, i, o); 112 dprintk("adr:0x%02x, i:%d, o:%d\n", client->addr, i, o);
112 113
113 /* check if the pins are valid */ 114 /* check if the pins are valid */
@@ -191,7 +192,7 @@ static int command(struct i2c_client *client, unsigned int cmd, void *arg)
191 192
192static struct i2c_driver driver = { 193static struct i2c_driver driver = {
193 .driver = { 194 .driver = {
194 .name = "tea6415c", 195 .name = "tea6415c",
195 }, 196 },
196 .id = I2C_DRIVERID_TEA6415C, 197 .id = I2C_DRIVERID_TEA6415C,
197 .attach_adapter = attach, 198 .attach_adapter = attach,
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index 4dcba5a4fff0..ad7d2872cfbf 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -26,6 +26,7 @@
26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/ioctl.h> 31#include <linux/ioctl.h>
31#include <linux/i2c.h> 32#include <linux/i2c.h>
@@ -83,7 +84,7 @@ static int tea6420_switch(struct i2c_client *client, int i, int o, int g)
83 dprintk("i2c_smbus_write_byte() failed, ret:%d\n", ret); 84 dprintk("i2c_smbus_write_byte() failed, ret:%d\n", ret);
84 return -EIO; 85 return -EIO;
85 } 86 }
86 87
87 return 0; 88 return 0;
88} 89}
89 90
@@ -167,7 +168,7 @@ static int command(struct i2c_client *client, unsigned int cmd, void *arg)
167 168
168static struct i2c_driver driver = { 169static struct i2c_driver driver = {
169 .driver = { 170 .driver = {
170 .name = "tea6420", 171 .name = "tea6420",
171 }, 172 },
172 .id = I2C_DRIVERID_TEA6420, 173 .id = I2C_DRIVERID_TEA6420,
173 .attach_adapter = attach, 174 .attach_adapter = attach,
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index b6101bf446d4..32e1849441fb 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -173,7 +173,6 @@ static void set_type(struct i2c_client *c, unsigned int type,
173 } 173 }
174 174
175 t->type = type; 175 t->type = type;
176
177 switch (t->type) { 176 switch (t->type) {
178 case TUNER_MT2032: 177 case TUNER_MT2032:
179 microtune_init(c); 178 microtune_init(c);
@@ -404,15 +403,16 @@ static void tuner_status(struct i2c_client *client)
404 tuner_info("Tuner mode: %s\n", p); 403 tuner_info("Tuner mode: %s\n", p);
405 tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction); 404 tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
406 tuner_info("Standard: 0x%08llx\n", t->std); 405 tuner_info("Standard: 0x%08llx\n", t->std);
407 if (t->mode == V4L2_TUNER_RADIO) { 406 if (t->mode != V4L2_TUNER_RADIO)
408 if (t->has_signal) { 407 return;
409 tuner_info("Signal strength: %d\n", t->has_signal(client)); 408 if (t->has_signal) {
410 } 409 tuner_info("Signal strength: %d\n", t->has_signal(client));
411 if (t->is_stereo) { 410 }
412 tuner_info("Stereo: %s\n", t->is_stereo(client) ? "yes" : "no"); 411 if (t->is_stereo) {
413 } 412 tuner_info("Stereo: %s\n", t->is_stereo(client) ? "yes" : "no");
414 } 413 }
415} 414}
415
416/* ---------------------------------------------------------------------- */ 416/* ---------------------------------------------------------------------- */
417 417
418/* static var Used only in tuner_attach and tuner_probe */ 418/* static var Used only in tuner_attach and tuner_probe */
@@ -744,33 +744,29 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
744 return 0; 744 return 0;
745 switch_v4l2(); 745 switch_v4l2();
746 746
747 if (V4L2_TUNER_RADIO == t->mode) { 747 tuner->type = t->mode;
748 748 if (t->mode != V4L2_TUNER_RADIO) {
749 if (t->has_signal)
750 tuner->signal = t->has_signal(client);
751
752 if (t->is_stereo) {
753 if (t->is_stereo(client)) {
754 tuner->rxsubchans =
755 V4L2_TUNER_SUB_STEREO |
756 V4L2_TUNER_SUB_MONO;
757 } else {
758 tuner->rxsubchans =
759 V4L2_TUNER_SUB_MONO;
760 }
761 }
762
763 tuner->capability |=
764 V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
765
766 tuner->audmode = t->audmode;
767
768 tuner->rangelow = radio_range[0] * 16000;
769 tuner->rangehigh = radio_range[1] * 16000;
770 } else {
771 tuner->rangelow = tv_range[0] * 16; 749 tuner->rangelow = tv_range[0] * 16;
772 tuner->rangehigh = tv_range[1] * 16; 750 tuner->rangehigh = tv_range[1] * 16;
751 break;
773 } 752 }
753
754 /* radio mode */
755 if (t->has_signal)
756 tuner->signal = t->has_signal(client);
757
758 tuner->rxsubchans =
759 V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
760 if (t->is_stereo) {
761 tuner->rxsubchans = t->is_stereo(client) ?
762 V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
763 }
764
765 tuner->capability |=
766 V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
767 tuner->audmode = t->audmode;
768 tuner->rangelow = radio_range[0] * 16000;
769 tuner->rangehigh = radio_range[1] * 16000;
774 break; 770 break;
775 } 771 }
776 case VIDIOC_S_TUNER: 772 case VIDIOC_S_TUNER:
@@ -782,10 +778,11 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
782 778
783 switch_v4l2(); 779 switch_v4l2();
784 780
785 if (V4L2_TUNER_RADIO == t->mode) { 781 /* do nothing unless we're a radio tuner */
786 t->audmode = tuner->audmode; 782 if (t->mode != V4L2_TUNER_RADIO)
787 set_radio_freq(client, t->radio_freq); 783 break;
788 } 784 t->audmode = tuner->audmode;
785 set_radio_freq(client, t->radio_freq);
789 break; 786 break;
790 } 787 }
791 case VIDIOC_LOG_STATUS: 788 case VIDIOC_LOG_STATUS:
diff --git a/drivers/media/video/tuner-simple.c b/drivers/media/video/tuner-simple.c
index 37977ff49780..5d7abed71674 100644
--- a/drivers/media/video/tuner-simple.c
+++ b/drivers/media/video/tuner-simple.c
@@ -79,17 +79,6 @@ MODULE_PARM_DESC(offset,"Allows to specify an offset for tuner");
79#define TUNER_PLL_LOCKED 0x40 79#define TUNER_PLL_LOCKED 0x40
80#define TUNER_STEREO_MK3 0x04 80#define TUNER_STEREO_MK3 0x04
81 81
82#define TUNER_PARAM_ANALOG 0 /* to be removed */
83/* FIXME:
84 * Right now, all tuners are using the first tuner_params[] array element
85 * for analog mode. In the future, we will be merging similar tuner
86 * definitions together, such that each tuner definition will have a
87 * tuner_params struct for each available video standard. At that point,
88 * TUNER_PARAM_ANALOG will be removed, and the tuner_params[] array
89 * element will be chosen based on the video standard in use.
90 *
91 */
92
93/* ---------------------------------------------------------------------- */ 82/* ---------------------------------------------------------------------- */
94 83
95static int tuner_getstatus(struct i2c_client *c) 84static int tuner_getstatus(struct i2c_client *c)
@@ -133,14 +122,53 @@ static int tuner_stereo(struct i2c_client *c)
133static void default_set_tv_freq(struct i2c_client *c, unsigned int freq) 122static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
134{ 123{
135 struct tuner *t = i2c_get_clientdata(c); 124 struct tuner *t = i2c_get_clientdata(c);
136 u8 config, tuneraddr; 125 u8 config, cb, tuneraddr;
137 u16 div; 126 u16 div;
138 struct tunertype *tun; 127 struct tunertype *tun;
139 u8 buffer[4]; 128 u8 buffer[4];
140 int rc, IFPCoff, i, j; 129 int rc, IFPCoff, i, j;
130 enum param_type desired_type;
141 131
142 tun = &tuners[t->type]; 132 tun = &tuners[t->type];
143 j = TUNER_PARAM_ANALOG; 133
134 /* IFPCoff = Video Intermediate Frequency - Vif:
135 940 =16*58.75 NTSC/J (Japan)
136 732 =16*45.75 M/N STD
137 704 =16*44 ATSC (at DVB code)
138 632 =16*39.50 I U.K.
139 622.4=16*38.90 B/G D/K I, L STD
140 592 =16*37.00 D China
141 590 =16.36.875 B Australia
142 543.2=16*33.95 L' STD
143 171.2=16*10.70 FM Radio (at set_radio_freq)
144 */
145
146 if (t->std == V4L2_STD_NTSC_M_JP) {
147 IFPCoff = 940;
148 desired_type = TUNER_PARAM_TYPE_NTSC;
149 } else if ((t->std & V4L2_STD_MN) &&
150 !(t->std & ~V4L2_STD_MN)) {
151 IFPCoff = 732;
152 desired_type = TUNER_PARAM_TYPE_NTSC;
153 } else if (t->std == V4L2_STD_SECAM_LC) {
154 IFPCoff = 543;
155 desired_type = TUNER_PARAM_TYPE_SECAM;
156 } else {
157 IFPCoff = 623;
158 desired_type = TUNER_PARAM_TYPE_PAL;
159 }
160
161 for (j = 0; j < tun->count-1; j++) {
162 if (desired_type != tun->params[j].type)
163 continue;
164 break;
165 }
166 /* use default tuner_params if desired_type not available */
167 if (desired_type != tun->params[j].type) {
168 tuner_dbg("IFPCoff = %d: tuner_params undefined for tuner %d\n",
169 IFPCoff,t->type);
170 j = 0;
171 }
144 172
145 for (i = 0; i < tun->params[j].count; i++) { 173 for (i = 0; i < tun->params[j].count; i++) {
146 if (freq > tun->params[j].ranges[i].limit) 174 if (freq > tun->params[j].ranges[i].limit)
@@ -152,11 +180,20 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
152 freq, tun->params[j].ranges[i - 1].limit); 180 freq, tun->params[j].ranges[i - 1].limit);
153 freq = tun->params[j].ranges[--i].limit; 181 freq = tun->params[j].ranges[--i].limit;
154 } 182 }
155 config = tun->params[j].ranges[i].cb; 183 config = tun->params[j].ranges[i].config;
156 /* i == 0 -> VHF_LO */ 184 cb = tun->params[j].ranges[i].cb;
157 /* i == 1 -> VHF_HI */ 185 /* i == 0 -> VHF_LO
158 /* i == 2 -> UHF */ 186 * i == 1 -> VHF_HI
159 tuner_dbg("tv: range %d\n",i); 187 * i == 2 -> UHF */
188 tuner_dbg("tv: param %d, range %d\n",j,i);
189
190 div=freq + IFPCoff + offset;
191
192 tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, Offset=%d.%02d MHz, div=%0d\n",
193 freq / 16, freq % 16 * 100 / 16,
194 IFPCoff / 16, IFPCoff % 16 * 100 / 16,
195 offset / 16, offset % 16 * 100 / 16,
196 div);
160 197
161 /* tv norm specific stuff for multi-norm tuners */ 198 /* tv norm specific stuff for multi-norm tuners */
162 switch (t->type) { 199 switch (t->type) {
@@ -164,40 +201,40 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
164 /* 0x01 -> ??? no change ??? */ 201 /* 0x01 -> ??? no change ??? */
165 /* 0x02 -> PAL BDGHI / SECAM L */ 202 /* 0x02 -> PAL BDGHI / SECAM L */
166 /* 0x04 -> ??? PAL others / SECAM others ??? */ 203 /* 0x04 -> ??? PAL others / SECAM others ??? */
167 config &= ~0x02; 204 cb &= ~0x02;
168 if (t->std & V4L2_STD_SECAM) 205 if (t->std & V4L2_STD_SECAM)
169 config |= 0x02; 206 cb |= 0x02;
170 break; 207 break;
171 208
172 case TUNER_TEMIC_4046FM5: 209 case TUNER_TEMIC_4046FM5:
173 config &= ~0x0f; 210 cb &= ~0x0f;
174 211
175 if (t->std & V4L2_STD_PAL_BG) { 212 if (t->std & V4L2_STD_PAL_BG) {
176 config |= TEMIC_SET_PAL_BG; 213 cb |= TEMIC_SET_PAL_BG;
177 214
178 } else if (t->std & V4L2_STD_PAL_I) { 215 } else if (t->std & V4L2_STD_PAL_I) {
179 config |= TEMIC_SET_PAL_I; 216 cb |= TEMIC_SET_PAL_I;
180 217
181 } else if (t->std & V4L2_STD_PAL_DK) { 218 } else if (t->std & V4L2_STD_PAL_DK) {
182 config |= TEMIC_SET_PAL_DK; 219 cb |= TEMIC_SET_PAL_DK;
183 220
184 } else if (t->std & V4L2_STD_SECAM_L) { 221 } else if (t->std & V4L2_STD_SECAM_L) {
185 config |= TEMIC_SET_PAL_L; 222 cb |= TEMIC_SET_PAL_L;
186 223
187 } 224 }
188 break; 225 break;
189 226
190 case TUNER_PHILIPS_FQ1216ME: 227 case TUNER_PHILIPS_FQ1216ME:
191 config &= ~0x0f; 228 cb &= ~0x0f;
192 229
193 if (t->std & (V4L2_STD_PAL_BG|V4L2_STD_PAL_DK)) { 230 if (t->std & (V4L2_STD_PAL_BG|V4L2_STD_PAL_DK)) {
194 config |= PHILIPS_SET_PAL_BGDK; 231 cb |= PHILIPS_SET_PAL_BGDK;
195 232
196 } else if (t->std & V4L2_STD_PAL_I) { 233 } else if (t->std & V4L2_STD_PAL_I) {
197 config |= PHILIPS_SET_PAL_I; 234 cb |= PHILIPS_SET_PAL_I;
198 235
199 } else if (t->std & V4L2_STD_SECAM_L) { 236 } else if (t->std & V4L2_STD_SECAM_L) {
200 config |= PHILIPS_SET_PAL_L; 237 cb |= PHILIPS_SET_PAL_L;
201 238
202 } 239 }
203 break; 240 break;
@@ -207,15 +244,15 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
207 /* 0x01 -> ATSC antenna input 2 */ 244 /* 0x01 -> ATSC antenna input 2 */
208 /* 0x02 -> NTSC antenna input 1 */ 245 /* 0x02 -> NTSC antenna input 1 */
209 /* 0x03 -> NTSC antenna input 2 */ 246 /* 0x03 -> NTSC antenna input 2 */
210 config &= ~0x03; 247 cb &= ~0x03;
211 if (!(t->std & V4L2_STD_ATSC)) 248 if (!(t->std & V4L2_STD_ATSC))
212 config |= 2; 249 cb |= 2;
213 /* FIXME: input */ 250 /* FIXME: input */
214 break; 251 break;
215 252
216 case TUNER_MICROTUNE_4042FI5: 253 case TUNER_MICROTUNE_4042FI5:
217 /* Set the charge pump for fast tuning */ 254 /* Set the charge pump for fast tuning */
218 tun->params[j].config |= TUNER_CHARGE_PUMP; 255 config |= TUNER_CHARGE_PUMP;
219 break; 256 break;
220 257
221 case TUNER_PHILIPS_TUV1236D: 258 case TUNER_PHILIPS_TUV1236D:
@@ -227,9 +264,9 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
227 buffer[1] = 0x00; 264 buffer[1] = 0x00;
228 buffer[2] = 0x17; 265 buffer[2] = 0x17;
229 buffer[3] = 0x00; 266 buffer[3] = 0x00;
230 config &= ~0x40; 267 cb &= ~0x40;
231 if (t->std & V4L2_STD_ATSC) { 268 if (t->std & V4L2_STD_ATSC) {
232 config |= 0x40; 269 cb |= 0x40;
233 buffer[1] = 0x04; 270 buffer[1] = 0x04;
234 } 271 }
235 /* set to the correct mode (analog or digital) */ 272 /* set to the correct mode (analog or digital) */
@@ -244,47 +281,16 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
244 break; 281 break;
245 } 282 }
246 283
247 /* IFPCoff = Video Intermediate Frequency - Vif:
248 940 =16*58.75 NTSC/J (Japan)
249 732 =16*45.75 M/N STD
250 704 =16*44 ATSC (at DVB code)
251 632 =16*39.50 I U.K.
252 622.4=16*38.90 B/G D/K I, L STD
253 592 =16*37.00 D China
254 590 =16.36.875 B Australia
255 543.2=16*33.95 L' STD
256 171.2=16*10.70 FM Radio (at set_radio_freq)
257 */
258
259 if (t->std == V4L2_STD_NTSC_M_JP) {
260 IFPCoff = 940;
261 } else if ((t->std & V4L2_STD_MN) &&
262 !(t->std & ~V4L2_STD_MN)) {
263 IFPCoff = 732;
264 } else if (t->std == V4L2_STD_SECAM_LC) {
265 IFPCoff = 543;
266 } else {
267 IFPCoff = 623;
268 }
269
270 div=freq + IFPCoff + offset;
271
272 tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, Offset=%d.%02d MHz, div=%0d\n",
273 freq / 16, freq % 16 * 100 / 16,
274 IFPCoff / 16, IFPCoff % 16 * 100 / 16,
275 offset / 16, offset % 16 * 100 / 16,
276 div);
277
278 if (tuners[t->type].params->cb_first_if_lower_freq && div < t->last_div) { 284 if (tuners[t->type].params->cb_first_if_lower_freq && div < t->last_div) {
279 buffer[0] = tun->params[j].config; 285 buffer[0] = config;
280 buffer[1] = config; 286 buffer[1] = cb;
281 buffer[2] = (div>>8) & 0x7f; 287 buffer[2] = (div>>8) & 0x7f;
282 buffer[3] = div & 0xff; 288 buffer[3] = div & 0xff;
283 } else { 289 } else {
284 buffer[0] = (div>>8) & 0x7f; 290 buffer[0] = (div>>8) & 0x7f;
285 buffer[1] = div & 0xff; 291 buffer[1] = div & 0xff;
286 buffer[2] = tun->params[j].config; 292 buffer[2] = config;
287 buffer[3] = config; 293 buffer[3] = cb;
288 } 294 }
289 t->last_div = div; 295 t->last_div = div;
290 tuner_dbg("tv 0x%02x 0x%02x 0x%02x 0x%02x\n", 296 tuner_dbg("tv 0x%02x 0x%02x 0x%02x 0x%02x\n",
@@ -312,11 +318,11 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
312 } 318 }
313 319
314 /* Set the charge pump for optimized phase noise figure */ 320 /* Set the charge pump for optimized phase noise figure */
315 tun->params[j].config &= ~TUNER_CHARGE_PUMP; 321 config &= ~TUNER_CHARGE_PUMP;
316 buffer[0] = (div>>8) & 0x7f; 322 buffer[0] = (div>>8) & 0x7f;
317 buffer[1] = div & 0xff; 323 buffer[1] = div & 0xff;
318 buffer[2] = tun->params[j].config; 324 buffer[2] = config;
319 buffer[3] = config; 325 buffer[3] = cb;
320 tuner_dbg("tv 0x%02x 0x%02x 0x%02x 0x%02x\n", 326 tuner_dbg("tv 0x%02x 0x%02x 0x%02x 0x%02x\n",
321 buffer[0],buffer[1],buffer[2],buffer[3]); 327 buffer[0],buffer[1],buffer[2],buffer[3]);
322 328
@@ -332,12 +338,21 @@ static void default_set_radio_freq(struct i2c_client *c, unsigned int freq)
332 u8 buffer[4]; 338 u8 buffer[4];
333 u16 div; 339 u16 div;
334 int rc, j; 340 int rc, j;
341 enum param_type desired_type = TUNER_PARAM_TYPE_RADIO;
335 342
336 tun = &tuners[t->type]; 343 tun = &tuners[t->type];
337 j = TUNER_PARAM_ANALOG; 344
345 for (j = 0; j < tun->count-1; j++) {
346 if (desired_type != tun->params[j].type)
347 continue;
348 break;
349 }
350 /* use default tuner_params if desired_type not available */
351 if (desired_type != tun->params[j].type)
352 j = 0;
338 353
339 div = (20 * freq / 16000) + (int)(20*10.7); /* IF 10.7 MHz */ 354 div = (20 * freq / 16000) + (int)(20*10.7); /* IF 10.7 MHz */
340 buffer[2] = (tun->params[j].config & ~TUNER_RATIO_MASK) | TUNER_RATIO_SELECT_50; /* 50 kHz step */ 355 buffer[2] = (tun->params[j].ranges[0].config & ~TUNER_RATIO_MASK) | TUNER_RATIO_SELECT_50; /* 50 kHz step */
341 356
342 switch (t->type) { 357 switch (t->type) {
343 case TUNER_TENA_9533_DI: 358 case TUNER_TENA_9533_DI:
@@ -349,6 +364,9 @@ static void default_set_radio_freq(struct i2c_client *c, unsigned int freq)
349 case TUNER_PHILIPS_FMD1216ME_MK3: 364 case TUNER_PHILIPS_FMD1216ME_MK3:
350 buffer[3] = 0x19; 365 buffer[3] = 0x19;
351 break; 366 break;
367 case TUNER_TNF_5335MF:
368 buffer[3] = 0x11;
369 break;
352 case TUNER_PHILIPS_FM1256_IH3: 370 case TUNER_PHILIPS_FM1256_IH3:
353 div = (20 * freq) / 16000 + (int)(33.3 * 20); /* IF 33.3 MHz */ 371 div = (20 * freq) / 16000 + (int)(33.3 * 20); /* IF 33.3 MHz */
354 buffer[3] = 0x19; 372 buffer[3] = 0x19;
diff --git a/drivers/media/video/tuner-types.c b/drivers/media/video/tuner-types.c
index 6fe781798d89..72e0f01db563 100644
--- a/drivers/media/video/tuner-types.c
+++ b/drivers/media/video/tuner-types.c
@@ -23,22 +23,25 @@
23 * Each tuner_params array may contain one or more elements, one 23 * Each tuner_params array may contain one or more elements, one
24 * for each video standard. 24 * for each video standard.
25 * 25 *
26 * FIXME: Some tuner_range definitions are duplicated, and 26 * FIXME: tuner_params struct contains an element, tda988x. We must
27 * should be eliminated. 27 * set this for all tuners that contain a tda988x chip, and then we
28 * can remove this setting from the various card structs.
28 * 29 *
29 * FIXME: tunertype struct contains an element, has_tda988x. 30 * FIXME: Right now, all tuners are using the first tuner_params[]
30 * We must set this for all tunertypes that contain a tda988x 31 * array element for analog mode. In the future, we will be merging
31 * chip, and then we can remove this setting from the various 32 * similar tuner definitions together, such that each tuner definition
32 * card structs. 33 * will have a tuner_params struct for each available video standard.
34 * At that point, the tuner_params[] array element will be chosen
35 * based on the video standard in use.
33 */ 36 */
34 37
35/* 0-9 */ 38/* 0-9 */
36/* ------------ TUNER_TEMIC_PAL - TEMIC PAL ------------ */ 39/* ------------ TUNER_TEMIC_PAL - TEMIC PAL ------------ */
37 40
38static struct tuner_range tuner_temic_pal_ranges[] = { 41static struct tuner_range tuner_temic_pal_ranges[] = {
39 { 16 * 140.25 /*MHz*/, 0x02, }, 42 { 16 * 140.25 /*MHz*/, 0x8e, 0x02, },
40 { 16 * 463.25 /*MHz*/, 0x04, }, 43 { 16 * 463.25 /*MHz*/, 0x8e, 0x04, },
41 { 16 * 999.99 , 0x01, }, 44 { 16 * 999.99 , 0x8e, 0x01, },
42}; 45};
43 46
44static struct tuner_params tuner_temic_pal_params[] = { 47static struct tuner_params tuner_temic_pal_params[] = {
@@ -46,16 +49,15 @@ static struct tuner_params tuner_temic_pal_params[] = {
46 .type = TUNER_PARAM_TYPE_PAL, 49 .type = TUNER_PARAM_TYPE_PAL,
47 .ranges = tuner_temic_pal_ranges, 50 .ranges = tuner_temic_pal_ranges,
48 .count = ARRAY_SIZE(tuner_temic_pal_ranges), 51 .count = ARRAY_SIZE(tuner_temic_pal_ranges),
49 .config = 0x8e,
50 }, 52 },
51}; 53};
52 54
53/* ------------ TUNER_PHILIPS_PAL_I - Philips PAL_I ------------ */ 55/* ------------ TUNER_PHILIPS_PAL_I - Philips PAL_I ------------ */
54 56
55static struct tuner_range tuner_philips_pal_i_ranges[] = { 57static struct tuner_range tuner_philips_pal_i_ranges[] = {
56 { 16 * 140.25 /*MHz*/, 0xa0, }, 58 { 16 * 140.25 /*MHz*/, 0x8e, 0xa0, },
57 { 16 * 463.25 /*MHz*/, 0x90, }, 59 { 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
58 { 16 * 999.99 , 0x30, }, 60 { 16 * 999.99 , 0x8e, 0x30, },
59}; 61};
60 62
61static struct tuner_params tuner_philips_pal_i_params[] = { 63static struct tuner_params tuner_philips_pal_i_params[] = {
@@ -63,16 +65,15 @@ static struct tuner_params tuner_philips_pal_i_params[] = {
63 .type = TUNER_PARAM_TYPE_PAL, 65 .type = TUNER_PARAM_TYPE_PAL,
64 .ranges = tuner_philips_pal_i_ranges, 66 .ranges = tuner_philips_pal_i_ranges,
65 .count = ARRAY_SIZE(tuner_philips_pal_i_ranges), 67 .count = ARRAY_SIZE(tuner_philips_pal_i_ranges),
66 .config = 0x8e,
67 }, 68 },
68}; 69};
69 70
70/* ------------ TUNER_PHILIPS_NTSC - Philips NTSC ------------ */ 71/* ------------ TUNER_PHILIPS_NTSC - Philips NTSC ------------ */
71 72
72static struct tuner_range tuner_philips_ntsc_ranges[] = { 73static struct tuner_range tuner_philips_ntsc_ranges[] = {
73 { 16 * 157.25 /*MHz*/, 0xa0, }, 74 { 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
74 { 16 * 451.25 /*MHz*/, 0x90, }, 75 { 16 * 451.25 /*MHz*/, 0x8e, 0x90, },
75 { 16 * 999.99 , 0x30, }, 76 { 16 * 999.99 , 0x8e, 0x30, },
76}; 77};
77 78
78static struct tuner_params tuner_philips_ntsc_params[] = { 79static struct tuner_params tuner_philips_ntsc_params[] = {
@@ -80,7 +81,6 @@ static struct tuner_params tuner_philips_ntsc_params[] = {
80 .type = TUNER_PARAM_TYPE_NTSC, 81 .type = TUNER_PARAM_TYPE_NTSC,
81 .ranges = tuner_philips_ntsc_ranges, 82 .ranges = tuner_philips_ntsc_ranges,
82 .count = ARRAY_SIZE(tuner_philips_ntsc_ranges), 83 .count = ARRAY_SIZE(tuner_philips_ntsc_ranges),
83 .config = 0x8e,
84 .cb_first_if_lower_freq = 1, 84 .cb_first_if_lower_freq = 1,
85 }, 85 },
86}; 86};
@@ -88,9 +88,9 @@ static struct tuner_params tuner_philips_ntsc_params[] = {
88/* ------------ TUNER_PHILIPS_SECAM - Philips SECAM ------------ */ 88/* ------------ TUNER_PHILIPS_SECAM - Philips SECAM ------------ */
89 89
90static struct tuner_range tuner_philips_secam_ranges[] = { 90static struct tuner_range tuner_philips_secam_ranges[] = {
91 { 16 * 168.25 /*MHz*/, 0xa7, }, 91 { 16 * 168.25 /*MHz*/, 0x8e, 0xa7, },
92 { 16 * 447.25 /*MHz*/, 0x97, }, 92 { 16 * 447.25 /*MHz*/, 0x8e, 0x97, },
93 { 16 * 999.99 , 0x37, }, 93 { 16 * 999.99 , 0x8e, 0x37, },
94}; 94};
95 95
96static struct tuner_params tuner_philips_secam_params[] = { 96static struct tuner_params tuner_philips_secam_params[] = {
@@ -98,7 +98,6 @@ static struct tuner_params tuner_philips_secam_params[] = {
98 .type = TUNER_PARAM_TYPE_SECAM, 98 .type = TUNER_PARAM_TYPE_SECAM,
99 .ranges = tuner_philips_secam_ranges, 99 .ranges = tuner_philips_secam_ranges,
100 .count = ARRAY_SIZE(tuner_philips_secam_ranges), 100 .count = ARRAY_SIZE(tuner_philips_secam_ranges),
101 .config = 0x8e,
102 .cb_first_if_lower_freq = 1, 101 .cb_first_if_lower_freq = 1,
103 }, 102 },
104}; 103};
@@ -106,9 +105,9 @@ static struct tuner_params tuner_philips_secam_params[] = {
106/* ------------ TUNER_PHILIPS_PAL - Philips PAL ------------ */ 105/* ------------ TUNER_PHILIPS_PAL - Philips PAL ------------ */
107 106
108static struct tuner_range tuner_philips_pal_ranges[] = { 107static struct tuner_range tuner_philips_pal_ranges[] = {
109 { 16 * 168.25 /*MHz*/, 0xa0, }, 108 { 16 * 168.25 /*MHz*/, 0x8e, 0xa0, },
110 { 16 * 447.25 /*MHz*/, 0x90, }, 109 { 16 * 447.25 /*MHz*/, 0x8e, 0x90, },
111 { 16 * 999.99 , 0x30, }, 110 { 16 * 999.99 , 0x8e, 0x30, },
112}; 111};
113 112
114static struct tuner_params tuner_philips_pal_params[] = { 113static struct tuner_params tuner_philips_pal_params[] = {
@@ -116,7 +115,6 @@ static struct tuner_params tuner_philips_pal_params[] = {
116 .type = TUNER_PARAM_TYPE_PAL, 115 .type = TUNER_PARAM_TYPE_PAL,
117 .ranges = tuner_philips_pal_ranges, 116 .ranges = tuner_philips_pal_ranges,
118 .count = ARRAY_SIZE(tuner_philips_pal_ranges), 117 .count = ARRAY_SIZE(tuner_philips_pal_ranges),
119 .config = 0x8e,
120 .cb_first_if_lower_freq = 1, 118 .cb_first_if_lower_freq = 1,
121 }, 119 },
122}; 120};
@@ -124,9 +122,9 @@ static struct tuner_params tuner_philips_pal_params[] = {
124/* ------------ TUNER_TEMIC_NTSC - TEMIC NTSC ------------ */ 122/* ------------ TUNER_TEMIC_NTSC - TEMIC NTSC ------------ */
125 123
126static struct tuner_range tuner_temic_ntsc_ranges[] = { 124static struct tuner_range tuner_temic_ntsc_ranges[] = {
127 { 16 * 157.25 /*MHz*/, 0x02, }, 125 { 16 * 157.25 /*MHz*/, 0x8e, 0x02, },
128 { 16 * 463.25 /*MHz*/, 0x04, }, 126 { 16 * 463.25 /*MHz*/, 0x8e, 0x04, },
129 { 16 * 999.99 , 0x01, }, 127 { 16 * 999.99 , 0x8e, 0x01, },
130}; 128};
131 129
132static struct tuner_params tuner_temic_ntsc_params[] = { 130static struct tuner_params tuner_temic_ntsc_params[] = {
@@ -134,16 +132,15 @@ static struct tuner_params tuner_temic_ntsc_params[] = {
134 .type = TUNER_PARAM_TYPE_NTSC, 132 .type = TUNER_PARAM_TYPE_NTSC,
135 .ranges = tuner_temic_ntsc_ranges, 133 .ranges = tuner_temic_ntsc_ranges,
136 .count = ARRAY_SIZE(tuner_temic_ntsc_ranges), 134 .count = ARRAY_SIZE(tuner_temic_ntsc_ranges),
137 .config = 0x8e,
138 }, 135 },
139}; 136};
140 137
141/* ------------ TUNER_TEMIC_PAL_I - TEMIC PAL_I ------------ */ 138/* ------------ TUNER_TEMIC_PAL_I - TEMIC PAL_I ------------ */
142 139
143static struct tuner_range tuner_temic_pal_i_ranges[] = { 140static struct tuner_range tuner_temic_pal_i_ranges[] = {
144 { 16 * 170.00 /*MHz*/, 0x02, }, 141 { 16 * 170.00 /*MHz*/, 0x8e, 0x02, },
145 { 16 * 450.00 /*MHz*/, 0x04, }, 142 { 16 * 450.00 /*MHz*/, 0x8e, 0x04, },
146 { 16 * 999.99 , 0x01, }, 143 { 16 * 999.99 , 0x8e, 0x01, },
147}; 144};
148 145
149static struct tuner_params tuner_temic_pal_i_params[] = { 146static struct tuner_params tuner_temic_pal_i_params[] = {
@@ -151,16 +148,15 @@ static struct tuner_params tuner_temic_pal_i_params[] = {
151 .type = TUNER_PARAM_TYPE_PAL, 148 .type = TUNER_PARAM_TYPE_PAL,
152 .ranges = tuner_temic_pal_i_ranges, 149 .ranges = tuner_temic_pal_i_ranges,
153 .count = ARRAY_SIZE(tuner_temic_pal_i_ranges), 150 .count = ARRAY_SIZE(tuner_temic_pal_i_ranges),
154 .config = 0x8e,
155 }, 151 },
156}; 152};
157 153
158/* ------------ TUNER_TEMIC_4036FY5_NTSC - TEMIC NTSC ------------ */ 154/* ------------ TUNER_TEMIC_4036FY5_NTSC - TEMIC NTSC ------------ */
159 155
160static struct tuner_range tuner_temic_4036fy5_ntsc_ranges[] = { 156static struct tuner_range tuner_temic_4036fy5_ntsc_ranges[] = {
161 { 16 * 157.25 /*MHz*/, 0xa0, }, 157 { 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
162 { 16 * 463.25 /*MHz*/, 0x90, }, 158 { 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
163 { 16 * 999.99 , 0x30, }, 159 { 16 * 999.99 , 0x8e, 0x30, },
164}; 160};
165 161
166static struct tuner_params tuner_temic_4036fy5_ntsc_params[] = { 162static struct tuner_params tuner_temic_4036fy5_ntsc_params[] = {
@@ -168,16 +164,15 @@ static struct tuner_params tuner_temic_4036fy5_ntsc_params[] = {
168 .type = TUNER_PARAM_TYPE_NTSC, 164 .type = TUNER_PARAM_TYPE_NTSC,
169 .ranges = tuner_temic_4036fy5_ntsc_ranges, 165 .ranges = tuner_temic_4036fy5_ntsc_ranges,
170 .count = ARRAY_SIZE(tuner_temic_4036fy5_ntsc_ranges), 166 .count = ARRAY_SIZE(tuner_temic_4036fy5_ntsc_ranges),
171 .config = 0x8e,
172 }, 167 },
173}; 168};
174 169
175/* ------------ TUNER_ALPS_TSBH1_NTSC - TEMIC NTSC ------------ */ 170/* ------------ TUNER_ALPS_TSBH1_NTSC - TEMIC NTSC ------------ */
176 171
177static struct tuner_range tuner_alps_tsb_1_ranges[] = { 172static struct tuner_range tuner_alps_tsb_1_ranges[] = {
178 { 16 * 137.25 /*MHz*/, 0x01, }, 173 { 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
179 { 16 * 385.25 /*MHz*/, 0x02, }, 174 { 16 * 385.25 /*MHz*/, 0x8e, 0x02, },
180 { 16 * 999.99 , 0x08, }, 175 { 16 * 999.99 , 0x8e, 0x08, },
181}; 176};
182 177
183static struct tuner_params tuner_alps_tsbh1_ntsc_params[] = { 178static struct tuner_params tuner_alps_tsbh1_ntsc_params[] = {
@@ -185,7 +180,6 @@ static struct tuner_params tuner_alps_tsbh1_ntsc_params[] = {
185 .type = TUNER_PARAM_TYPE_NTSC, 180 .type = TUNER_PARAM_TYPE_NTSC,
186 .ranges = tuner_alps_tsb_1_ranges, 181 .ranges = tuner_alps_tsb_1_ranges,
187 .count = ARRAY_SIZE(tuner_alps_tsb_1_ranges), 182 .count = ARRAY_SIZE(tuner_alps_tsb_1_ranges),
188 .config = 0x8e,
189 }, 183 },
190}; 184};
191 185
@@ -197,16 +191,15 @@ static struct tuner_params tuner_alps_tsb_1_params[] = {
197 .type = TUNER_PARAM_TYPE_PAL, 191 .type = TUNER_PARAM_TYPE_PAL,
198 .ranges = tuner_alps_tsb_1_ranges, 192 .ranges = tuner_alps_tsb_1_ranges,
199 .count = ARRAY_SIZE(tuner_alps_tsb_1_ranges), 193 .count = ARRAY_SIZE(tuner_alps_tsb_1_ranges),
200 .config = 0x8e,
201 }, 194 },
202}; 195};
203 196
204/* ------------ TUNER_ALPS_TSBB5_PAL_I - Alps PAL_I ------------ */ 197/* ------------ TUNER_ALPS_TSBB5_PAL_I - Alps PAL_I ------------ */
205 198
206static struct tuner_range tuner_alps_tsb_5_pal_ranges[] = { 199static struct tuner_range tuner_alps_tsb_5_pal_ranges[] = {
207 { 16 * 133.25 /*MHz*/, 0x01, }, 200 { 16 * 133.25 /*MHz*/, 0x8e, 0x01, },
208 { 16 * 351.25 /*MHz*/, 0x02, }, 201 { 16 * 351.25 /*MHz*/, 0x8e, 0x02, },
209 { 16 * 999.99 , 0x08, }, 202 { 16 * 999.99 , 0x8e, 0x08, },
210}; 203};
211 204
212static struct tuner_params tuner_alps_tsbb5_params[] = { 205static struct tuner_params tuner_alps_tsbb5_params[] = {
@@ -214,7 +207,6 @@ static struct tuner_params tuner_alps_tsbb5_params[] = {
214 .type = TUNER_PARAM_TYPE_PAL, 207 .type = TUNER_PARAM_TYPE_PAL,
215 .ranges = tuner_alps_tsb_5_pal_ranges, 208 .ranges = tuner_alps_tsb_5_pal_ranges,
216 .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges), 209 .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges),
217 .config = 0x8e,
218 }, 210 },
219}; 211};
220 212
@@ -225,7 +217,6 @@ static struct tuner_params tuner_alps_tsbe5_params[] = {
225 .type = TUNER_PARAM_TYPE_PAL, 217 .type = TUNER_PARAM_TYPE_PAL,
226 .ranges = tuner_alps_tsb_5_pal_ranges, 218 .ranges = tuner_alps_tsb_5_pal_ranges,
227 .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges), 219 .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges),
228 .config = 0x8e,
229 }, 220 },
230}; 221};
231 222
@@ -236,33 +227,31 @@ static struct tuner_params tuner_alps_tsbc5_params[] = {
236 .type = TUNER_PARAM_TYPE_PAL, 227 .type = TUNER_PARAM_TYPE_PAL,
237 .ranges = tuner_alps_tsb_5_pal_ranges, 228 .ranges = tuner_alps_tsb_5_pal_ranges,
238 .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges), 229 .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges),
239 .config = 0x8e,
240 }, 230 },
241}; 231};
242 232
243/* ------------ TUNER_TEMIC_4006FH5_PAL - TEMIC PAL ------------ */ 233/* ------------ TUNER_TEMIC_4006FH5_PAL - TEMIC PAL ------------ */
244 234
245static struct tuner_range tuner_temic_4006fh5_pal_ranges[] = { 235static struct tuner_range tuner_lg_pal_ranges[] = {
246 { 16 * 170.00 /*MHz*/, 0xa0, }, 236 { 16 * 170.00 /*MHz*/, 0x8e, 0xa0, },
247 { 16 * 450.00 /*MHz*/, 0x90, }, 237 { 16 * 450.00 /*MHz*/, 0x8e, 0x90, },
248 { 16 * 999.99 , 0x30, }, 238 { 16 * 999.99 , 0x8e, 0x30, },
249}; 239};
250 240
251static struct tuner_params tuner_temic_4006fh5_params[] = { 241static struct tuner_params tuner_temic_4006fh5_params[] = {
252 { 242 {
253 .type = TUNER_PARAM_TYPE_PAL, 243 .type = TUNER_PARAM_TYPE_PAL,
254 .ranges = tuner_temic_4006fh5_pal_ranges, 244 .ranges = tuner_lg_pal_ranges,
255 .count = ARRAY_SIZE(tuner_temic_4006fh5_pal_ranges), 245 .count = ARRAY_SIZE(tuner_lg_pal_ranges),
256 .config = 0x8e,
257 }, 246 },
258}; 247};
259 248
260/* ------------ TUNER_ALPS_TSHC6_NTSC - Alps NTSC ------------ */ 249/* ------------ TUNER_ALPS_TSHC6_NTSC - Alps NTSC ------------ */
261 250
262static struct tuner_range tuner_alps_tshc6_ntsc_ranges[] = { 251static struct tuner_range tuner_alps_tshc6_ntsc_ranges[] = {
263 { 16 * 137.25 /*MHz*/, 0x14, }, 252 { 16 * 137.25 /*MHz*/, 0x8e, 0x14, },
264 { 16 * 385.25 /*MHz*/, 0x12, }, 253 { 16 * 385.25 /*MHz*/, 0x8e, 0x12, },
265 { 16 * 999.99 , 0x11, }, 254 { 16 * 999.99 , 0x8e, 0x11, },
266}; 255};
267 256
268static struct tuner_params tuner_alps_tshc6_params[] = { 257static struct tuner_params tuner_alps_tshc6_params[] = {
@@ -270,16 +259,15 @@ static struct tuner_params tuner_alps_tshc6_params[] = {
270 .type = TUNER_PARAM_TYPE_NTSC, 259 .type = TUNER_PARAM_TYPE_NTSC,
271 .ranges = tuner_alps_tshc6_ntsc_ranges, 260 .ranges = tuner_alps_tshc6_ntsc_ranges,
272 .count = ARRAY_SIZE(tuner_alps_tshc6_ntsc_ranges), 261 .count = ARRAY_SIZE(tuner_alps_tshc6_ntsc_ranges),
273 .config = 0x8e,
274 }, 262 },
275}; 263};
276 264
277/* ------------ TUNER_TEMIC_PAL_DK - TEMIC PAL ------------ */ 265/* ------------ TUNER_TEMIC_PAL_DK - TEMIC PAL ------------ */
278 266
279static struct tuner_range tuner_temic_pal_dk_ranges[] = { 267static struct tuner_range tuner_temic_pal_dk_ranges[] = {
280 { 16 * 168.25 /*MHz*/, 0xa0, }, 268 { 16 * 168.25 /*MHz*/, 0x8e, 0xa0, },
281 { 16 * 456.25 /*MHz*/, 0x90, }, 269 { 16 * 456.25 /*MHz*/, 0x8e, 0x90, },
282 { 16 * 999.99 , 0x30, }, 270 { 16 * 999.99 , 0x8e, 0x30, },
283}; 271};
284 272
285static struct tuner_params tuner_temic_pal_dk_params[] = { 273static struct tuner_params tuner_temic_pal_dk_params[] = {
@@ -287,16 +275,15 @@ static struct tuner_params tuner_temic_pal_dk_params[] = {
287 .type = TUNER_PARAM_TYPE_PAL, 275 .type = TUNER_PARAM_TYPE_PAL,
288 .ranges = tuner_temic_pal_dk_ranges, 276 .ranges = tuner_temic_pal_dk_ranges,
289 .count = ARRAY_SIZE(tuner_temic_pal_dk_ranges), 277 .count = ARRAY_SIZE(tuner_temic_pal_dk_ranges),
290 .config = 0x8e,
291 }, 278 },
292}; 279};
293 280
294/* ------------ TUNER_PHILIPS_NTSC_M - Philips NTSC ------------ */ 281/* ------------ TUNER_PHILIPS_NTSC_M - Philips NTSC ------------ */
295 282
296static struct tuner_range tuner_philips_ntsc_m_ranges[] = { 283static struct tuner_range tuner_philips_ntsc_m_ranges[] = {
297 { 16 * 160.00 /*MHz*/, 0xa0, }, 284 { 16 * 160.00 /*MHz*/, 0x8e, 0xa0, },
298 { 16 * 454.00 /*MHz*/, 0x90, }, 285 { 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
299 { 16 * 999.99 , 0x30, }, 286 { 16 * 999.99 , 0x8e, 0x30, },
300}; 287};
301 288
302static struct tuner_params tuner_philips_ntsc_m_params[] = { 289static struct tuner_params tuner_philips_ntsc_m_params[] = {
@@ -304,16 +291,15 @@ static struct tuner_params tuner_philips_ntsc_m_params[] = {
304 .type = TUNER_PARAM_TYPE_NTSC, 291 .type = TUNER_PARAM_TYPE_NTSC,
305 .ranges = tuner_philips_ntsc_m_ranges, 292 .ranges = tuner_philips_ntsc_m_ranges,
306 .count = ARRAY_SIZE(tuner_philips_ntsc_m_ranges), 293 .count = ARRAY_SIZE(tuner_philips_ntsc_m_ranges),
307 .config = 0x8e,
308 }, 294 },
309}; 295};
310 296
311/* ------------ TUNER_TEMIC_4066FY5_PAL_I - TEMIC PAL_I ------------ */ 297/* ------------ TUNER_TEMIC_4066FY5_PAL_I - TEMIC PAL_I ------------ */
312 298
313static struct tuner_range tuner_temic_40x6f_5_pal_ranges[] = { 299static struct tuner_range tuner_temic_40x6f_5_pal_ranges[] = {
314 { 16 * 169.00 /*MHz*/, 0xa0, }, 300 { 16 * 169.00 /*MHz*/, 0x8e, 0xa0, },
315 { 16 * 454.00 /*MHz*/, 0x90, }, 301 { 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
316 { 16 * 999.99 , 0x30, }, 302 { 16 * 999.99 , 0x8e, 0x30, },
317}; 303};
318 304
319static struct tuner_params tuner_temic_4066fy5_pal_i_params[] = { 305static struct tuner_params tuner_temic_4066fy5_pal_i_params[] = {
@@ -321,7 +307,6 @@ static struct tuner_params tuner_temic_4066fy5_pal_i_params[] = {
321 .type = TUNER_PARAM_TYPE_PAL, 307 .type = TUNER_PARAM_TYPE_PAL,
322 .ranges = tuner_temic_40x6f_5_pal_ranges, 308 .ranges = tuner_temic_40x6f_5_pal_ranges,
323 .count = ARRAY_SIZE(tuner_temic_40x6f_5_pal_ranges), 309 .count = ARRAY_SIZE(tuner_temic_40x6f_5_pal_ranges),
324 .config = 0x8e,
325 }, 310 },
326}; 311};
327 312
@@ -332,7 +317,6 @@ static struct tuner_params tuner_temic_4006fn5_multi_params[] = {
332 .type = TUNER_PARAM_TYPE_PAL, 317 .type = TUNER_PARAM_TYPE_PAL,
333 .ranges = tuner_temic_40x6f_5_pal_ranges, 318 .ranges = tuner_temic_40x6f_5_pal_ranges,
334 .count = ARRAY_SIZE(tuner_temic_40x6f_5_pal_ranges), 319 .count = ARRAY_SIZE(tuner_temic_40x6f_5_pal_ranges),
335 .config = 0x8e,
336 }, 320 },
337}; 321};
338 322
@@ -340,9 +324,9 @@ static struct tuner_params tuner_temic_4006fn5_multi_params[] = {
340/* ------------ TUNER_TEMIC_4009FR5_PAL - TEMIC PAL ------------ */ 324/* ------------ TUNER_TEMIC_4009FR5_PAL - TEMIC PAL ------------ */
341 325
342static struct tuner_range tuner_temic_4009f_5_pal_ranges[] = { 326static struct tuner_range tuner_temic_4009f_5_pal_ranges[] = {
343 { 16 * 141.00 /*MHz*/, 0xa0, }, 327 { 16 * 141.00 /*MHz*/, 0x8e, 0xa0, },
344 { 16 * 464.00 /*MHz*/, 0x90, }, 328 { 16 * 464.00 /*MHz*/, 0x8e, 0x90, },
345 { 16 * 999.99 , 0x30, }, 329 { 16 * 999.99 , 0x8e, 0x30, },
346}; 330};
347 331
348static struct tuner_params tuner_temic_4009f_5_params[] = { 332static struct tuner_params tuner_temic_4009f_5_params[] = {
@@ -350,58 +334,42 @@ static struct tuner_params tuner_temic_4009f_5_params[] = {
350 .type = TUNER_PARAM_TYPE_PAL, 334 .type = TUNER_PARAM_TYPE_PAL,
351 .ranges = tuner_temic_4009f_5_pal_ranges, 335 .ranges = tuner_temic_4009f_5_pal_ranges,
352 .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges), 336 .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges),
353 .config = 0x8e,
354 }, 337 },
355}; 338};
356 339
357/* ------------ TUNER_TEMIC_4039FR5_NTSC - TEMIC NTSC ------------ */ 340/* ------------ TUNER_TEMIC_4039FR5_NTSC - TEMIC NTSC ------------ */
358 341
359static struct tuner_range tuner_temic_4039fr5_ntsc_ranges[] = { 342static struct tuner_range tuner_temic_4x3x_f_5_ntsc_ranges[] = {
360 { 16 * 158.00 /*MHz*/, 0xa0, }, 343 { 16 * 158.00 /*MHz*/, 0x8e, 0xa0, },
361 { 16 * 453.00 /*MHz*/, 0x90, }, 344 { 16 * 453.00 /*MHz*/, 0x8e, 0x90, },
362 { 16 * 999.99 , 0x30, }, 345 { 16 * 999.99 , 0x8e, 0x30, },
363}; 346};
364 347
365static struct tuner_params tuner_temic_4039fr5_params[] = { 348static struct tuner_params tuner_temic_4039fr5_params[] = {
366 { 349 {
367 .type = TUNER_PARAM_TYPE_NTSC, 350 .type = TUNER_PARAM_TYPE_NTSC,
368 .ranges = tuner_temic_4039fr5_ntsc_ranges, 351 .ranges = tuner_temic_4x3x_f_5_ntsc_ranges,
369 .count = ARRAY_SIZE(tuner_temic_4039fr5_ntsc_ranges), 352 .count = ARRAY_SIZE(tuner_temic_4x3x_f_5_ntsc_ranges),
370 .config = 0x8e,
371 }, 353 },
372}; 354};
373 355
374/* ------------ TUNER_TEMIC_4046FM5 - TEMIC PAL ------------ */ 356/* ------------ TUNER_TEMIC_4046FM5 - TEMIC PAL ------------ */
375 357
376static struct tuner_range tuner_temic_4046fm5_pal_ranges[] = {
377 { 16 * 169.00 /*MHz*/, 0xa0, },
378 { 16 * 454.00 /*MHz*/, 0x90, },
379 { 16 * 999.99 , 0x30, },
380};
381
382static struct tuner_params tuner_temic_4046fm5_params[] = { 358static struct tuner_params tuner_temic_4046fm5_params[] = {
383 { 359 {
384 .type = TUNER_PARAM_TYPE_PAL, 360 .type = TUNER_PARAM_TYPE_PAL,
385 .ranges = tuner_temic_4046fm5_pal_ranges, 361 .ranges = tuner_temic_40x6f_5_pal_ranges,
386 .count = ARRAY_SIZE(tuner_temic_4046fm5_pal_ranges), 362 .count = ARRAY_SIZE(tuner_temic_40x6f_5_pal_ranges),
387 .config = 0x8e,
388 }, 363 },
389}; 364};
390 365
391/* ------------ TUNER_PHILIPS_PAL_DK - Philips PAL ------------ */ 366/* ------------ TUNER_PHILIPS_PAL_DK - Philips PAL ------------ */
392 367
393static struct tuner_range tuner_lg_pal_ranges[] = {
394 { 16 * 170.00 /*MHz*/, 0xa0, },
395 { 16 * 450.00 /*MHz*/, 0x90, },
396 { 16 * 999.99 , 0x30, },
397};
398
399static struct tuner_params tuner_philips_pal_dk_params[] = { 368static struct tuner_params tuner_philips_pal_dk_params[] = {
400 { 369 {
401 .type = TUNER_PARAM_TYPE_PAL, 370 .type = TUNER_PARAM_TYPE_PAL,
402 .ranges = tuner_lg_pal_ranges, 371 .ranges = tuner_lg_pal_ranges,
403 .count = ARRAY_SIZE(tuner_lg_pal_ranges), 372 .count = ARRAY_SIZE(tuner_lg_pal_ranges),
404 .config = 0x8e,
405 }, 373 },
406}; 374};
407 375
@@ -412,7 +380,6 @@ static struct tuner_params tuner_philips_fq1216me_params[] = {
412 .type = TUNER_PARAM_TYPE_PAL, 380 .type = TUNER_PARAM_TYPE_PAL,
413 .ranges = tuner_lg_pal_ranges, 381 .ranges = tuner_lg_pal_ranges,
414 .count = ARRAY_SIZE(tuner_lg_pal_ranges), 382 .count = ARRAY_SIZE(tuner_lg_pal_ranges),
415 .config = 0x8e,
416 }, 383 },
417}; 384};
418 385
@@ -423,7 +390,6 @@ static struct tuner_params tuner_lg_pal_i_fm_params[] = {
423 .type = TUNER_PARAM_TYPE_PAL, 390 .type = TUNER_PARAM_TYPE_PAL,
424 .ranges = tuner_lg_pal_ranges, 391 .ranges = tuner_lg_pal_ranges,
425 .count = ARRAY_SIZE(tuner_lg_pal_ranges), 392 .count = ARRAY_SIZE(tuner_lg_pal_ranges),
426 .config = 0x8e,
427 }, 393 },
428}; 394};
429 395
@@ -434,16 +400,15 @@ static struct tuner_params tuner_lg_pal_i_params[] = {
434 .type = TUNER_PARAM_TYPE_PAL, 400 .type = TUNER_PARAM_TYPE_PAL,
435 .ranges = tuner_lg_pal_ranges, 401 .ranges = tuner_lg_pal_ranges,
436 .count = ARRAY_SIZE(tuner_lg_pal_ranges), 402 .count = ARRAY_SIZE(tuner_lg_pal_ranges),
437 .config = 0x8e,
438 }, 403 },
439}; 404};
440 405
441/* ------------ TUNER_LG_NTSC_FM - LGINNOTEK NTSC ------------ */ 406/* ------------ TUNER_LG_NTSC_FM - LGINNOTEK NTSC ------------ */
442 407
443static struct tuner_range tuner_lg_ntsc_fm_ranges[] = { 408static struct tuner_range tuner_lg_ntsc_fm_ranges[] = {
444 { 16 * 210.00 /*MHz*/, 0xa0, }, 409 { 16 * 210.00 /*MHz*/, 0x8e, 0xa0, },
445 { 16 * 497.00 /*MHz*/, 0x90, }, 410 { 16 * 497.00 /*MHz*/, 0x8e, 0x90, },
446 { 16 * 999.99 , 0x30, }, 411 { 16 * 999.99 , 0x8e, 0x30, },
447}; 412};
448 413
449static struct tuner_params tuner_lg_ntsc_fm_params[] = { 414static struct tuner_params tuner_lg_ntsc_fm_params[] = {
@@ -451,7 +416,6 @@ static struct tuner_params tuner_lg_ntsc_fm_params[] = {
451 .type = TUNER_PARAM_TYPE_NTSC, 416 .type = TUNER_PARAM_TYPE_NTSC,
452 .ranges = tuner_lg_ntsc_fm_ranges, 417 .ranges = tuner_lg_ntsc_fm_ranges,
453 .count = ARRAY_SIZE(tuner_lg_ntsc_fm_ranges), 418 .count = ARRAY_SIZE(tuner_lg_ntsc_fm_ranges),
454 .config = 0x8e,
455 }, 419 },
456}; 420};
457 421
@@ -462,7 +426,6 @@ static struct tuner_params tuner_lg_pal_fm_params[] = {
462 .type = TUNER_PARAM_TYPE_PAL, 426 .type = TUNER_PARAM_TYPE_PAL,
463 .ranges = tuner_lg_pal_ranges, 427 .ranges = tuner_lg_pal_ranges,
464 .count = ARRAY_SIZE(tuner_lg_pal_ranges), 428 .count = ARRAY_SIZE(tuner_lg_pal_ranges),
465 .config = 0x8e,
466 }, 429 },
467}; 430};
468 431
@@ -473,7 +436,6 @@ static struct tuner_params tuner_lg_pal_params[] = {
473 .type = TUNER_PARAM_TYPE_PAL, 436 .type = TUNER_PARAM_TYPE_PAL,
474 .ranges = tuner_lg_pal_ranges, 437 .ranges = tuner_lg_pal_ranges,
475 .count = ARRAY_SIZE(tuner_lg_pal_ranges), 438 .count = ARRAY_SIZE(tuner_lg_pal_ranges),
476 .config = 0x8e,
477 }, 439 },
478}; 440};
479 441
@@ -485,16 +447,15 @@ static struct tuner_params tuner_temic_4009_fn5_multi_pal_fm_params[] = {
485 .type = TUNER_PARAM_TYPE_PAL, 447 .type = TUNER_PARAM_TYPE_PAL,
486 .ranges = tuner_temic_4009f_5_pal_ranges, 448 .ranges = tuner_temic_4009f_5_pal_ranges,
487 .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges), 449 .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges),
488 .config = 0x8e,
489 }, 450 },
490}; 451};
491 452
492/* ------------ TUNER_SHARP_2U5JF5540_NTSC - SHARP NTSC ------------ */ 453/* ------------ TUNER_SHARP_2U5JF5540_NTSC - SHARP NTSC ------------ */
493 454
494static struct tuner_range tuner_sharp_2u5jf5540_ntsc_ranges[] = { 455static struct tuner_range tuner_sharp_2u5jf5540_ntsc_ranges[] = {
495 { 16 * 137.25 /*MHz*/, 0x01, }, 456 { 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
496 { 16 * 317.25 /*MHz*/, 0x02, }, 457 { 16 * 317.25 /*MHz*/, 0x8e, 0x02, },
497 { 16 * 999.99 , 0x08, }, 458 { 16 * 999.99 , 0x8e, 0x08, },
498}; 459};
499 460
500static struct tuner_params tuner_sharp_2u5jf5540_params[] = { 461static struct tuner_params tuner_sharp_2u5jf5540_params[] = {
@@ -502,16 +463,15 @@ static struct tuner_params tuner_sharp_2u5jf5540_params[] = {
502 .type = TUNER_PARAM_TYPE_NTSC, 463 .type = TUNER_PARAM_TYPE_NTSC,
503 .ranges = tuner_sharp_2u5jf5540_ntsc_ranges, 464 .ranges = tuner_sharp_2u5jf5540_ntsc_ranges,
504 .count = ARRAY_SIZE(tuner_sharp_2u5jf5540_ntsc_ranges), 465 .count = ARRAY_SIZE(tuner_sharp_2u5jf5540_ntsc_ranges),
505 .config = 0x8e,
506 }, 466 },
507}; 467};
508 468
509/* ------------ TUNER_Samsung_PAL_TCPM9091PD27 - Samsung PAL ------------ */ 469/* ------------ TUNER_Samsung_PAL_TCPM9091PD27 - Samsung PAL ------------ */
510 470
511static struct tuner_range tuner_samsung_pal_tcpm9091pd27_ranges[] = { 471static struct tuner_range tuner_samsung_pal_tcpm9091pd27_ranges[] = {
512 { 16 * 169 /*MHz*/, 0xa0, }, 472 { 16 * 169 /*MHz*/, 0x8e, 0xa0, },
513 { 16 * 464 /*MHz*/, 0x90, }, 473 { 16 * 464 /*MHz*/, 0x8e, 0x90, },
514 { 16 * 999.99 , 0x30, }, 474 { 16 * 999.99 , 0x8e, 0x30, },
515}; 475};
516 476
517static struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = { 477static struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = {
@@ -519,7 +479,6 @@ static struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = {
519 .type = TUNER_PARAM_TYPE_PAL, 479 .type = TUNER_PARAM_TYPE_PAL,
520 .ranges = tuner_samsung_pal_tcpm9091pd27_ranges, 480 .ranges = tuner_samsung_pal_tcpm9091pd27_ranges,
521 .count = ARRAY_SIZE(tuner_samsung_pal_tcpm9091pd27_ranges), 481 .count = ARRAY_SIZE(tuner_samsung_pal_tcpm9091pd27_ranges),
522 .config = 0x8e,
523 }, 482 },
524}; 483};
525 484
@@ -530,50 +489,35 @@ static struct tuner_params tuner_temic_4106fh5_params[] = {
530 .type = TUNER_PARAM_TYPE_PAL, 489 .type = TUNER_PARAM_TYPE_PAL,
531 .ranges = tuner_temic_4009f_5_pal_ranges, 490 .ranges = tuner_temic_4009f_5_pal_ranges,
532 .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges), 491 .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges),
533 .config = 0x8e,
534 }, 492 },
535}; 493};
536 494
537/* ------------ TUNER_TEMIC_4012FY5 - TEMIC PAL ------------ */ 495/* ------------ TUNER_TEMIC_4012FY5 - TEMIC PAL ------------ */
538 496
539static struct tuner_range tuner_temic_4012fy5_pal_ranges[] = {
540 { 16 * 140.25 /*MHz*/, 0x02, },
541 { 16 * 463.25 /*MHz*/, 0x04, },
542 { 16 * 999.99 , 0x01, },
543};
544
545static struct tuner_params tuner_temic_4012fy5_params[] = { 497static struct tuner_params tuner_temic_4012fy5_params[] = {
546 { 498 {
547 .type = TUNER_PARAM_TYPE_PAL, 499 .type = TUNER_PARAM_TYPE_PAL,
548 .ranges = tuner_temic_4012fy5_pal_ranges, 500 .ranges = tuner_temic_pal_ranges,
549 .count = ARRAY_SIZE(tuner_temic_4012fy5_pal_ranges), 501 .count = ARRAY_SIZE(tuner_temic_pal_ranges),
550 .config = 0x8e,
551 }, 502 },
552}; 503};
553 504
554/* ------------ TUNER_TEMIC_4136FY5 - TEMIC NTSC ------------ */ 505/* ------------ TUNER_TEMIC_4136FY5 - TEMIC NTSC ------------ */
555 506
556static struct tuner_range tuner_temic_4136_fy5_ntsc_ranges[] = {
557 { 16 * 158.00 /*MHz*/, 0xa0, },
558 { 16 * 453.00 /*MHz*/, 0x90, },
559 { 16 * 999.99 , 0x30, },
560};
561
562static struct tuner_params tuner_temic_4136_fy5_params[] = { 507static struct tuner_params tuner_temic_4136_fy5_params[] = {
563 { 508 {
564 .type = TUNER_PARAM_TYPE_NTSC, 509 .type = TUNER_PARAM_TYPE_NTSC,
565 .ranges = tuner_temic_4136_fy5_ntsc_ranges, 510 .ranges = tuner_temic_4x3x_f_5_ntsc_ranges,
566 .count = ARRAY_SIZE(tuner_temic_4136_fy5_ntsc_ranges), 511 .count = ARRAY_SIZE(tuner_temic_4x3x_f_5_ntsc_ranges),
567 .config = 0x8e,
568 }, 512 },
569}; 513};
570 514
571/* ------------ TUNER_LG_PAL_NEW_TAPC - LGINNOTEK PAL ------------ */ 515/* ------------ TUNER_LG_PAL_NEW_TAPC - LGINNOTEK PAL ------------ */
572 516
573static struct tuner_range tuner_lg_new_tapc_ranges[] = { 517static struct tuner_range tuner_lg_new_tapc_ranges[] = {
574 { 16 * 170.00 /*MHz*/, 0x01, }, 518 { 16 * 170.00 /*MHz*/, 0x8e, 0x01, },
575 { 16 * 450.00 /*MHz*/, 0x02, }, 519 { 16 * 450.00 /*MHz*/, 0x8e, 0x02, },
576 { 16 * 999.99 , 0x08, }, 520 { 16 * 999.99 , 0x8e, 0x08, },
577}; 521};
578 522
579static struct tuner_params tuner_lg_pal_new_tapc_params[] = { 523static struct tuner_params tuner_lg_pal_new_tapc_params[] = {
@@ -581,16 +525,15 @@ static struct tuner_params tuner_lg_pal_new_tapc_params[] = {
581 .type = TUNER_PARAM_TYPE_PAL, 525 .type = TUNER_PARAM_TYPE_PAL,
582 .ranges = tuner_lg_new_tapc_ranges, 526 .ranges = tuner_lg_new_tapc_ranges,
583 .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges), 527 .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges),
584 .config = 0x8e,
585 }, 528 },
586}; 529};
587 530
588/* ------------ TUNER_PHILIPS_FM1216ME_MK3 - Philips PAL ------------ */ 531/* ------------ TUNER_PHILIPS_FM1216ME_MK3 - Philips PAL ------------ */
589 532
590static struct tuner_range tuner_fm1216me_mk3_pal_ranges[] = { 533static struct tuner_range tuner_fm1216me_mk3_pal_ranges[] = {
591 { 16 * 158.00 /*MHz*/, 0x01, }, 534 { 16 * 158.00 /*MHz*/, 0x8e, 0x01, },
592 { 16 * 442.00 /*MHz*/, 0x02, }, 535 { 16 * 442.00 /*MHz*/, 0x8e, 0x02, },
593 { 16 * 999.99 , 0x04, }, 536 { 16 * 999.99 , 0x8e, 0x04, },
594}; 537};
595 538
596static struct tuner_params tuner_fm1216me_mk3_params[] = { 539static struct tuner_params tuner_fm1216me_mk3_params[] = {
@@ -598,7 +541,6 @@ static struct tuner_params tuner_fm1216me_mk3_params[] = {
598 .type = TUNER_PARAM_TYPE_PAL, 541 .type = TUNER_PARAM_TYPE_PAL,
599 .ranges = tuner_fm1216me_mk3_pal_ranges, 542 .ranges = tuner_fm1216me_mk3_pal_ranges,
600 .count = ARRAY_SIZE(tuner_fm1216me_mk3_pal_ranges), 543 .count = ARRAY_SIZE(tuner_fm1216me_mk3_pal_ranges),
601 .config = 0x8e,
602 .cb_first_if_lower_freq = 1, 544 .cb_first_if_lower_freq = 1,
603 }, 545 },
604}; 546};
@@ -610,7 +552,6 @@ static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
610 .type = TUNER_PARAM_TYPE_NTSC, 552 .type = TUNER_PARAM_TYPE_NTSC,
611 .ranges = tuner_lg_new_tapc_ranges, 553 .ranges = tuner_lg_new_tapc_ranges,
612 .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges), 554 .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges),
613 .config = 0x8e,
614 }, 555 },
615}; 556};
616 557
@@ -622,16 +563,15 @@ static struct tuner_params tuner_hitachi_ntsc_params[] = {
622 .type = TUNER_PARAM_TYPE_NTSC, 563 .type = TUNER_PARAM_TYPE_NTSC,
623 .ranges = tuner_lg_new_tapc_ranges, 564 .ranges = tuner_lg_new_tapc_ranges,
624 .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges), 565 .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges),
625 .config = 0x8e,
626 }, 566 },
627}; 567};
628 568
629/* ------------ TUNER_PHILIPS_PAL_MK - Philips PAL ------------ */ 569/* ------------ TUNER_PHILIPS_PAL_MK - Philips PAL ------------ */
630 570
631static struct tuner_range tuner_philips_pal_mk_pal_ranges[] = { 571static struct tuner_range tuner_philips_pal_mk_pal_ranges[] = {
632 { 16 * 140.25 /*MHz*/, 0x01, }, 572 { 16 * 140.25 /*MHz*/, 0x8e, 0x01, },
633 { 16 * 463.25 /*MHz*/, 0xc2, }, 573 { 16 * 463.25 /*MHz*/, 0x8e, 0xc2, },
634 { 16 * 999.99 , 0xcf, }, 574 { 16 * 999.99 , 0x8e, 0xcf, },
635}; 575};
636 576
637static struct tuner_params tuner_philips_pal_mk_params[] = { 577static struct tuner_params tuner_philips_pal_mk_params[] = {
@@ -639,16 +579,15 @@ static struct tuner_params tuner_philips_pal_mk_params[] = {
639 .type = TUNER_PARAM_TYPE_PAL, 579 .type = TUNER_PARAM_TYPE_PAL,
640 .ranges = tuner_philips_pal_mk_pal_ranges, 580 .ranges = tuner_philips_pal_mk_pal_ranges,
641 .count = ARRAY_SIZE(tuner_philips_pal_mk_pal_ranges), 581 .count = ARRAY_SIZE(tuner_philips_pal_mk_pal_ranges),
642 .config = 0x8e,
643 }, 582 },
644}; 583};
645 584
646/* ------------ TUNER_PHILIPS_ATSC - Philips ATSC ------------ */ 585/* ------------ TUNER_PHILIPS_ATSC - Philips ATSC ------------ */
647 586
648static struct tuner_range tuner_philips_atsc_ranges[] = { 587static struct tuner_range tuner_philips_atsc_ranges[] = {
649 { 16 * 157.25 /*MHz*/, 0xa0, }, 588 { 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
650 { 16 * 454.00 /*MHz*/, 0x90, }, 589 { 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
651 { 16 * 999.99 , 0x30, }, 590 { 16 * 999.99 , 0x8e, 0x30, },
652}; 591};
653 592
654static struct tuner_params tuner_philips_atsc_params[] = { 593static struct tuner_params tuner_philips_atsc_params[] = {
@@ -656,16 +595,15 @@ static struct tuner_params tuner_philips_atsc_params[] = {
656 .type = TUNER_PARAM_TYPE_NTSC, 595 .type = TUNER_PARAM_TYPE_NTSC,
657 .ranges = tuner_philips_atsc_ranges, 596 .ranges = tuner_philips_atsc_ranges,
658 .count = ARRAY_SIZE(tuner_philips_atsc_ranges), 597 .count = ARRAY_SIZE(tuner_philips_atsc_ranges),
659 .config = 0x8e,
660 }, 598 },
661}; 599};
662 600
663/* ------------ TUNER_PHILIPS_FM1236_MK3 - Philips NTSC ------------ */ 601/* ------------ TUNER_PHILIPS_FM1236_MK3 - Philips NTSC ------------ */
664 602
665static struct tuner_range tuner_fm1236_mk3_ntsc_ranges[] = { 603static struct tuner_range tuner_fm1236_mk3_ntsc_ranges[] = {
666 { 16 * 160.00 /*MHz*/, 0x01, }, 604 { 16 * 160.00 /*MHz*/, 0x8e, 0x01, },
667 { 16 * 442.00 /*MHz*/, 0x02, }, 605 { 16 * 442.00 /*MHz*/, 0x8e, 0x02, },
668 { 16 * 999.99 , 0x04, }, 606 { 16 * 999.99 , 0x8e, 0x04, },
669}; 607};
670 608
671static struct tuner_params tuner_fm1236_mk3_params[] = { 609static struct tuner_params tuner_fm1236_mk3_params[] = {
@@ -673,25 +611,17 @@ static struct tuner_params tuner_fm1236_mk3_params[] = {
673 .type = TUNER_PARAM_TYPE_NTSC, 611 .type = TUNER_PARAM_TYPE_NTSC,
674 .ranges = tuner_fm1236_mk3_ntsc_ranges, 612 .ranges = tuner_fm1236_mk3_ntsc_ranges,
675 .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges), 613 .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges),
676 .config = 0x8e,
677 .cb_first_if_lower_freq = 1, 614 .cb_first_if_lower_freq = 1,
678 }, 615 },
679}; 616};
680 617
681/* ------------ TUNER_PHILIPS_4IN1 - Philips NTSC ------------ */ 618/* ------------ TUNER_PHILIPS_4IN1 - Philips NTSC ------------ */
682 619
683static struct tuner_range tuner_philips_4in1_ntsc_ranges[] = {
684 { 16 * 160.00 /*MHz*/, 0x01, },
685 { 16 * 442.00 /*MHz*/, 0x02, },
686 { 16 * 999.99 , 0x04, },
687};
688
689static struct tuner_params tuner_philips_4in1_params[] = { 620static struct tuner_params tuner_philips_4in1_params[] = {
690 { 621 {
691 .type = TUNER_PARAM_TYPE_NTSC, 622 .type = TUNER_PARAM_TYPE_NTSC,
692 .ranges = tuner_philips_4in1_ntsc_ranges, 623 .ranges = tuner_fm1236_mk3_ntsc_ranges,
693 .count = ARRAY_SIZE(tuner_philips_4in1_ntsc_ranges), 624 .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges),
694 .config = 0x8e,
695 }, 625 },
696}; 626};
697 627
@@ -702,16 +632,15 @@ static struct tuner_params tuner_microtune_4049_fm5_params[] = {
702 .type = TUNER_PARAM_TYPE_PAL, 632 .type = TUNER_PARAM_TYPE_PAL,
703 .ranges = tuner_temic_4009f_5_pal_ranges, 633 .ranges = tuner_temic_4009f_5_pal_ranges,
704 .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges), 634 .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges),
705 .config = 0x8e,
706 }, 635 },
707}; 636};
708 637
709/* ------------ TUNER_PANASONIC_VP27 - Panasonic NTSC ------------ */ 638/* ------------ TUNER_PANASONIC_VP27 - Panasonic NTSC ------------ */
710 639
711static struct tuner_range tuner_panasonic_vp27_ntsc_ranges[] = { 640static struct tuner_range tuner_panasonic_vp27_ntsc_ranges[] = {
712 { 16 * 160.00 /*MHz*/, 0x01, }, 641 { 16 * 160.00 /*MHz*/, 0xce, 0x01, },
713 { 16 * 454.00 /*MHz*/, 0x02, }, 642 { 16 * 454.00 /*MHz*/, 0xce, 0x02, },
714 { 16 * 999.99 , 0x08, }, 643 { 16 * 999.99 , 0xce, 0x08, },
715}; 644};
716 645
717static struct tuner_params tuner_panasonic_vp27_params[] = { 646static struct tuner_params tuner_panasonic_vp27_params[] = {
@@ -719,33 +648,25 @@ static struct tuner_params tuner_panasonic_vp27_params[] = {
719 .type = TUNER_PARAM_TYPE_NTSC, 648 .type = TUNER_PARAM_TYPE_NTSC,
720 .ranges = tuner_panasonic_vp27_ntsc_ranges, 649 .ranges = tuner_panasonic_vp27_ntsc_ranges,
721 .count = ARRAY_SIZE(tuner_panasonic_vp27_ntsc_ranges), 650 .count = ARRAY_SIZE(tuner_panasonic_vp27_ntsc_ranges),
722 .config = 0xce,
723 }, 651 },
724}; 652};
725 653
726/* ------------ TUNER_LG_NTSC_TAPE - LGINNOTEK NTSC ------------ */ 654/* ------------ TUNER_LG_NTSC_TAPE - LGINNOTEK NTSC ------------ */
727 655
728static struct tuner_range tuner_lg_ntsc_tape_ranges[] = {
729 { 16 * 160.00 /*MHz*/, 0x01, },
730 { 16 * 442.00 /*MHz*/, 0x02, },
731 { 16 * 999.99 , 0x04, },
732};
733
734static struct tuner_params tuner_lg_ntsc_tape_params[] = { 656static struct tuner_params tuner_lg_ntsc_tape_params[] = {
735 { 657 {
736 .type = TUNER_PARAM_TYPE_NTSC, 658 .type = TUNER_PARAM_TYPE_NTSC,
737 .ranges = tuner_lg_ntsc_tape_ranges, 659 .ranges = tuner_fm1236_mk3_ntsc_ranges,
738 .count = ARRAY_SIZE(tuner_lg_ntsc_tape_ranges), 660 .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges),
739 .config = 0x8e,
740 }, 661 },
741}; 662};
742 663
743/* ------------ TUNER_TNF_8831BGFF - Philips PAL ------------ */ 664/* ------------ TUNER_TNF_8831BGFF - Philips PAL ------------ */
744 665
745static struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = { 666static struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = {
746 { 16 * 161.25 /*MHz*/, 0xa0, }, 667 { 16 * 161.25 /*MHz*/, 0x8e, 0xa0, },
747 { 16 * 463.25 /*MHz*/, 0x90, }, 668 { 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
748 { 16 * 999.99 , 0x30, }, 669 { 16 * 999.99 , 0x8e, 0x30, },
749}; 670};
750 671
751static struct tuner_params tuner_tnf_8831bgff_params[] = { 672static struct tuner_params tuner_tnf_8831bgff_params[] = {
@@ -753,16 +674,15 @@ static struct tuner_params tuner_tnf_8831bgff_params[] = {
753 .type = TUNER_PARAM_TYPE_PAL, 674 .type = TUNER_PARAM_TYPE_PAL,
754 .ranges = tuner_tnf_8831bgff_pal_ranges, 675 .ranges = tuner_tnf_8831bgff_pal_ranges,
755 .count = ARRAY_SIZE(tuner_tnf_8831bgff_pal_ranges), 676 .count = ARRAY_SIZE(tuner_tnf_8831bgff_pal_ranges),
756 .config = 0x8e,
757 }, 677 },
758}; 678};
759 679
760/* ------------ TUNER_MICROTUNE_4042FI5 - Microtune NTSC ------------ */ 680/* ------------ TUNER_MICROTUNE_4042FI5 - Microtune NTSC ------------ */
761 681
762static struct tuner_range tuner_microtune_4042fi5_ntsc_ranges[] = { 682static struct tuner_range tuner_microtune_4042fi5_ntsc_ranges[] = {
763 { 16 * 162.00 /*MHz*/, 0xa2, }, 683 { 16 * 162.00 /*MHz*/, 0x8e, 0xa2, },
764 { 16 * 457.00 /*MHz*/, 0x94, }, 684 { 16 * 457.00 /*MHz*/, 0x8e, 0x94, },
765 { 16 * 999.99 , 0x31, }, 685 { 16 * 999.99 , 0x8e, 0x31, },
766}; 686};
767 687
768static struct tuner_params tuner_microtune_4042fi5_params[] = { 688static struct tuner_params tuner_microtune_4042fi5_params[] = {
@@ -770,7 +690,6 @@ static struct tuner_params tuner_microtune_4042fi5_params[] = {
770 .type = TUNER_PARAM_TYPE_NTSC, 690 .type = TUNER_PARAM_TYPE_NTSC,
771 .ranges = tuner_microtune_4042fi5_ntsc_ranges, 691 .ranges = tuner_microtune_4042fi5_ntsc_ranges,
772 .count = ARRAY_SIZE(tuner_microtune_4042fi5_ntsc_ranges), 692 .count = ARRAY_SIZE(tuner_microtune_4042fi5_ntsc_ranges),
773 .config = 0x8e,
774 }, 693 },
775}; 694};
776 695
@@ -778,9 +697,9 @@ static struct tuner_params tuner_microtune_4042fi5_params[] = {
778/* ------------ TUNER_TCL_2002N - TCL NTSC ------------ */ 697/* ------------ TUNER_TCL_2002N - TCL NTSC ------------ */
779 698
780static struct tuner_range tuner_tcl_2002n_ntsc_ranges[] = { 699static struct tuner_range tuner_tcl_2002n_ntsc_ranges[] = {
781 { 16 * 172.00 /*MHz*/, 0x01, }, 700 { 16 * 172.00 /*MHz*/, 0x8e, 0x01, },
782 { 16 * 448.00 /*MHz*/, 0x02, }, 701 { 16 * 448.00 /*MHz*/, 0x8e, 0x02, },
783 { 16 * 999.99 , 0x08, }, 702 { 16 * 999.99 , 0x8e, 0x08, },
784}; 703};
785 704
786static struct tuner_params tuner_tcl_2002n_params[] = { 705static struct tuner_params tuner_tcl_2002n_params[] = {
@@ -788,34 +707,26 @@ static struct tuner_params tuner_tcl_2002n_params[] = {
788 .type = TUNER_PARAM_TYPE_NTSC, 707 .type = TUNER_PARAM_TYPE_NTSC,
789 .ranges = tuner_tcl_2002n_ntsc_ranges, 708 .ranges = tuner_tcl_2002n_ntsc_ranges,
790 .count = ARRAY_SIZE(tuner_tcl_2002n_ntsc_ranges), 709 .count = ARRAY_SIZE(tuner_tcl_2002n_ntsc_ranges),
791 .config = 0x8e,
792 .cb_first_if_lower_freq = 1, 710 .cb_first_if_lower_freq = 1,
793 }, 711 },
794}; 712};
795 713
796/* ------------ TUNER_PHILIPS_FM1256_IH3 - Philips PAL ------------ */ 714/* ------------ TUNER_PHILIPS_FM1256_IH3 - Philips PAL ------------ */
797 715
798static struct tuner_range tuner_philips_fm1256_ih3_pal_ranges[] = {
799 { 16 * 160.00 /*MHz*/, 0x01, },
800 { 16 * 442.00 /*MHz*/, 0x02, },
801 { 16 * 999.99 , 0x04, },
802};
803
804static struct tuner_params tuner_philips_fm1256_ih3_params[] = { 716static struct tuner_params tuner_philips_fm1256_ih3_params[] = {
805 { 717 {
806 .type = TUNER_PARAM_TYPE_PAL, 718 .type = TUNER_PARAM_TYPE_PAL,
807 .ranges = tuner_philips_fm1256_ih3_pal_ranges, 719 .ranges = tuner_fm1236_mk3_ntsc_ranges,
808 .count = ARRAY_SIZE(tuner_philips_fm1256_ih3_pal_ranges), 720 .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges),
809 .config = 0x8e,
810 }, 721 },
811}; 722};
812 723
813/* ------------ TUNER_THOMSON_DTT7610 - THOMSON ATSC ------------ */ 724/* ------------ TUNER_THOMSON_DTT7610 - THOMSON ATSC ------------ */
814 725
815static struct tuner_range tuner_thomson_dtt7610_ntsc_ranges[] = { 726static struct tuner_range tuner_thomson_dtt7610_ntsc_ranges[] = {
816 { 16 * 157.25 /*MHz*/, 0x39, }, 727 { 16 * 157.25 /*MHz*/, 0x8e, 0x39, },
817 { 16 * 454.00 /*MHz*/, 0x3a, }, 728 { 16 * 454.00 /*MHz*/, 0x8e, 0x3a, },
818 { 16 * 999.99 , 0x3c, }, 729 { 16 * 999.99 , 0x8e, 0x3c, },
819}; 730};
820 731
821static struct tuner_params tuner_thomson_dtt7610_params[] = { 732static struct tuner_params tuner_thomson_dtt7610_params[] = {
@@ -823,16 +734,15 @@ static struct tuner_params tuner_thomson_dtt7610_params[] = {
823 .type = TUNER_PARAM_TYPE_NTSC, 734 .type = TUNER_PARAM_TYPE_NTSC,
824 .ranges = tuner_thomson_dtt7610_ntsc_ranges, 735 .ranges = tuner_thomson_dtt7610_ntsc_ranges,
825 .count = ARRAY_SIZE(tuner_thomson_dtt7610_ntsc_ranges), 736 .count = ARRAY_SIZE(tuner_thomson_dtt7610_ntsc_ranges),
826 .config = 0x8e,
827 }, 737 },
828}; 738};
829 739
830/* ------------ TUNER_PHILIPS_FQ1286 - Philips NTSC ------------ */ 740/* ------------ TUNER_PHILIPS_FQ1286 - Philips NTSC ------------ */
831 741
832static struct tuner_range tuner_philips_fq1286_ntsc_ranges[] = { 742static struct tuner_range tuner_philips_fq1286_ntsc_ranges[] = {
833 { 16 * 160.00 /*MHz*/, 0x41, }, 743 { 16 * 160.00 /*MHz*/, 0x8e, 0x41, },
834 { 16 * 454.00 /*MHz*/, 0x42, }, 744 { 16 * 454.00 /*MHz*/, 0x8e, 0x42, },
835 { 16 * 999.99 , 0x04, }, 745 { 16 * 999.99 , 0x8e, 0x04, },
836}; 746};
837 747
838static struct tuner_params tuner_philips_fq1286_params[] = { 748static struct tuner_params tuner_philips_fq1286_params[] = {
@@ -840,16 +750,15 @@ static struct tuner_params tuner_philips_fq1286_params[] = {
840 .type = TUNER_PARAM_TYPE_NTSC, 750 .type = TUNER_PARAM_TYPE_NTSC,
841 .ranges = tuner_philips_fq1286_ntsc_ranges, 751 .ranges = tuner_philips_fq1286_ntsc_ranges,
842 .count = ARRAY_SIZE(tuner_philips_fq1286_ntsc_ranges), 752 .count = ARRAY_SIZE(tuner_philips_fq1286_ntsc_ranges),
843 .config = 0x8e,
844 }, 753 },
845}; 754};
846 755
847/* ------------ TUNER_TCL_2002MB - TCL PAL ------------ */ 756/* ------------ TUNER_TCL_2002MB - TCL PAL ------------ */
848 757
849static struct tuner_range tuner_tcl_2002mb_pal_ranges[] = { 758static struct tuner_range tuner_tcl_2002mb_pal_ranges[] = {
850 { 16 * 170.00 /*MHz*/, 0x01, }, 759 { 16 * 170.00 /*MHz*/, 0xce, 0x01, },
851 { 16 * 450.00 /*MHz*/, 0x02, }, 760 { 16 * 450.00 /*MHz*/, 0xce, 0x02, },
852 { 16 * 999.99 , 0x08, }, 761 { 16 * 999.99 , 0xce, 0x08, },
853}; 762};
854 763
855static struct tuner_params tuner_tcl_2002mb_params[] = { 764static struct tuner_params tuner_tcl_2002mb_params[] = {
@@ -857,24 +766,22 @@ static struct tuner_params tuner_tcl_2002mb_params[] = {
857 .type = TUNER_PARAM_TYPE_PAL, 766 .type = TUNER_PARAM_TYPE_PAL,
858 .ranges = tuner_tcl_2002mb_pal_ranges, 767 .ranges = tuner_tcl_2002mb_pal_ranges,
859 .count = ARRAY_SIZE(tuner_tcl_2002mb_pal_ranges), 768 .count = ARRAY_SIZE(tuner_tcl_2002mb_pal_ranges),
860 .config = 0xce,
861 }, 769 },
862}; 770};
863 771
864/* ------------ TUNER_PHILIPS_FQ1216AME_MK4 - Philips PAL ------------ */ 772/* ------------ TUNER_PHILIPS_FQ1216AME_MK4 - Philips PAL ------------ */
865 773
866static struct tuner_range tuner_philips_fq12_6a___mk4_ranges[] = { 774static struct tuner_range tuner_philips_fq12_6a___mk4_pal_ranges[] = {
867 { 16 * 160.00 /*MHz*/, 0x01, }, 775 { 16 * 160.00 /*MHz*/, 0xce, 0x01, },
868 { 16 * 442.00 /*MHz*/, 0x02, }, 776 { 16 * 442.00 /*MHz*/, 0xce, 0x02, },
869 { 16 * 999.99 , 0x04, }, 777 { 16 * 999.99 , 0xce, 0x04, },
870}; 778};
871 779
872static struct tuner_params tuner_philips_fq1216ame_mk4_params[] = { 780static struct tuner_params tuner_philips_fq1216ame_mk4_params[] = {
873 { 781 {
874 .type = TUNER_PARAM_TYPE_PAL, 782 .type = TUNER_PARAM_TYPE_PAL,
875 .ranges = tuner_philips_fq12_6a___mk4_ranges, 783 .ranges = tuner_philips_fq12_6a___mk4_pal_ranges,
876 .count = ARRAY_SIZE(tuner_philips_fq12_6a___mk4_ranges), 784 .count = ARRAY_SIZE(tuner_philips_fq12_6a___mk4_pal_ranges),
877 .config = 0xce,
878 }, 785 },
879}; 786};
880 787
@@ -883,35 +790,27 @@ static struct tuner_params tuner_philips_fq1216ame_mk4_params[] = {
883static struct tuner_params tuner_philips_fq1236a_mk4_params[] = { 790static struct tuner_params tuner_philips_fq1236a_mk4_params[] = {
884 { 791 {
885 .type = TUNER_PARAM_TYPE_NTSC, 792 .type = TUNER_PARAM_TYPE_NTSC,
886 .ranges = tuner_philips_fq12_6a___mk4_ranges, 793 .ranges = tuner_fm1236_mk3_ntsc_ranges,
887 .count = ARRAY_SIZE(tuner_philips_fq12_6a___mk4_ranges), 794 .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges),
888 .config = 0x8e,
889 }, 795 },
890}; 796};
891 797
892/* ------------ TUNER_YMEC_TVF_8531MF - Philips NTSC ------------ */ 798/* ------------ TUNER_YMEC_TVF_8531MF - Philips NTSC ------------ */
893 799
894static struct tuner_range tuner_ymec_tvf_8531mf_ntsc_ranges[] = {
895 { 16 * 160.00 /*MHz*/, 0xa0, },
896 { 16 * 454.00 /*MHz*/, 0x90, },
897 { 16 * 999.99 , 0x30, },
898};
899
900static struct tuner_params tuner_ymec_tvf_8531mf_params[] = { 800static struct tuner_params tuner_ymec_tvf_8531mf_params[] = {
901 { 801 {
902 .type = TUNER_PARAM_TYPE_NTSC, 802 .type = TUNER_PARAM_TYPE_NTSC,
903 .ranges = tuner_ymec_tvf_8531mf_ntsc_ranges, 803 .ranges = tuner_philips_ntsc_m_ranges,
904 .count = ARRAY_SIZE(tuner_ymec_tvf_8531mf_ntsc_ranges), 804 .count = ARRAY_SIZE(tuner_philips_ntsc_m_ranges),
905 .config = 0x8e,
906 }, 805 },
907}; 806};
908 807
909/* ------------ TUNER_YMEC_TVF_5533MF - Philips NTSC ------------ */ 808/* ------------ TUNER_YMEC_TVF_5533MF - Philips NTSC ------------ */
910 809
911static struct tuner_range tuner_ymec_tvf_5533mf_ntsc_ranges[] = { 810static struct tuner_range tuner_ymec_tvf_5533mf_ntsc_ranges[] = {
912 { 16 * 160.00 /*MHz*/, 0x01, }, 811 { 16 * 160.00 /*MHz*/, 0x8e, 0x01, },
913 { 16 * 454.00 /*MHz*/, 0x02, }, 812 { 16 * 454.00 /*MHz*/, 0x8e, 0x02, },
914 { 16 * 999.99 , 0x04, }, 813 { 16 * 999.99 , 0x8e, 0x04, },
915}; 814};
916 815
917static struct tuner_params tuner_ymec_tvf_5533mf_params[] = { 816static struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
@@ -919,7 +818,6 @@ static struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
919 .type = TUNER_PARAM_TYPE_NTSC, 818 .type = TUNER_PARAM_TYPE_NTSC,
920 .ranges = tuner_ymec_tvf_5533mf_ntsc_ranges, 819 .ranges = tuner_ymec_tvf_5533mf_ntsc_ranges,
921 .count = ARRAY_SIZE(tuner_ymec_tvf_5533mf_ntsc_ranges), 820 .count = ARRAY_SIZE(tuner_ymec_tvf_5533mf_ntsc_ranges),
922 .config = 0x8e,
923 }, 821 },
924}; 822};
925 823
@@ -928,9 +826,9 @@ static struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
928/* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */ 826/* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
929 827
930static struct tuner_range tuner_thomson_dtt761x_ntsc_ranges[] = { 828static struct tuner_range tuner_thomson_dtt761x_ntsc_ranges[] = {
931 { 16 * 145.25 /*MHz*/, 0x39, }, 829 { 16 * 145.25 /*MHz*/, 0x8e, 0x39, },
932 { 16 * 415.25 /*MHz*/, 0x3a, }, 830 { 16 * 415.25 /*MHz*/, 0x8e, 0x3a, },
933 { 16 * 999.99 , 0x3c, }, 831 { 16 * 999.99 , 0x8e, 0x3c, },
934}; 832};
935 833
936 834
@@ -939,42 +837,39 @@ static struct tuner_params tuner_thomson_dtt761x_params[] = {
939 .type = TUNER_PARAM_TYPE_NTSC, 837 .type = TUNER_PARAM_TYPE_NTSC,
940 .ranges = tuner_thomson_dtt761x_ntsc_ranges, 838 .ranges = tuner_thomson_dtt761x_ntsc_ranges,
941 .count = ARRAY_SIZE(tuner_thomson_dtt761x_ntsc_ranges), 839 .count = ARRAY_SIZE(tuner_thomson_dtt761x_ntsc_ranges),
942 .config = 0x8e,
943 }, 840 },
944}; 841};
945 842
946/* ------------ TUNER_TENA_9533_DI - Philips PAL ------------ */ 843/* ------------ TUNER_TENA_9533_DI - Philips PAL ------------ */
947 844
948static struct tuner_range tuner_tuner_tena_9533_di_pal_ranges[] = { 845static struct tuner_range tuner_tena_9533_di_pal_ranges[] = {
949 { 16 * 160.25 /*MHz*/, 0x01, }, 846 { 16 * 160.25 /*MHz*/, 0x8e, 0x01, },
950 { 16 * 464.25 /*MHz*/, 0x02, }, 847 { 16 * 464.25 /*MHz*/, 0x8e, 0x02, },
951 { 16 * 999.99 , 0x04, }, 848 { 16 * 999.99 , 0x8e, 0x04, },
952}; 849};
953 850
954static struct tuner_params tuner_tena_9533_di_params[] = { 851static struct tuner_params tuner_tena_9533_di_params[] = {
955 { 852 {
956 .type = TUNER_PARAM_TYPE_PAL, 853 .type = TUNER_PARAM_TYPE_PAL,
957 .ranges = tuner_tuner_tena_9533_di_pal_ranges, 854 .ranges = tuner_tena_9533_di_pal_ranges,
958 .count = ARRAY_SIZE(tuner_tuner_tena_9533_di_pal_ranges), 855 .count = ARRAY_SIZE(tuner_tena_9533_di_pal_ranges),
959 .config = 0x8e,
960 }, 856 },
961}; 857};
962 858
963/* ------------ TUNER_PHILIPS_FMD1216ME_MK3 - Philips PAL ------------ */ 859/* ------------ TUNER_PHILIPS_FMD1216ME_MK3 - Philips PAL ------------ */
964 860
965static struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = { 861static struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = {
966 { 16 * 160.00 /*MHz*/, 0x51, }, 862 { 16 * 160.00 /*MHz*/, 0x86, 0x51, },
967 { 16 * 442.00 /*MHz*/, 0x52, }, 863 { 16 * 442.00 /*MHz*/, 0x86, 0x52, },
968 { 16 * 999.99 , 0x54, }, 864 { 16 * 999.99 , 0x86, 0x54, },
969}; 865};
970 866
971 867
972static struct tuner_params tuner_tuner_philips_fmd1216me_mk3_params[] = { 868static struct tuner_params tuner_philips_fmd1216me_mk3_params[] = {
973 { 869 {
974 .type = TUNER_PARAM_TYPE_PAL, 870 .type = TUNER_PARAM_TYPE_PAL,
975 .ranges = tuner_philips_fmd1216me_mk3_pal_ranges, 871 .ranges = tuner_philips_fmd1216me_mk3_pal_ranges,
976 .count = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_pal_ranges), 872 .count = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_pal_ranges),
977 .config = 0x86,
978 }, 873 },
979}; 874};
980 875
@@ -982,9 +877,9 @@ static struct tuner_params tuner_tuner_philips_fmd1216me_mk3_params[] = {
982/* ------------ TUNER_LG_TDVS_H062F - INFINEON ATSC ------------ */ 877/* ------------ TUNER_LG_TDVS_H062F - INFINEON ATSC ------------ */
983 878
984static struct tuner_range tuner_tua6034_ntsc_ranges[] = { 879static struct tuner_range tuner_tua6034_ntsc_ranges[] = {
985 { 16 * 160.00 /*MHz*/, 0x01 }, 880 { 16 * 160.00 /*MHz*/, 0x8e, 0x01 },
986 { 16 * 455.00 /*MHz*/, 0x02 }, 881 { 16 * 455.00 /*MHz*/, 0x8e, 0x02 },
987 { 16 * 999.99 , 0x04 }, 882 { 16 * 999.99 , 0x8e, 0x04 },
988}; 883};
989 884
990 885
@@ -993,50 +888,51 @@ static struct tuner_params tuner_tua6034_params[] = {
993 .type = TUNER_PARAM_TYPE_NTSC, 888 .type = TUNER_PARAM_TYPE_NTSC,
994 .ranges = tuner_tua6034_ntsc_ranges, 889 .ranges = tuner_tua6034_ntsc_ranges,
995 .count = ARRAY_SIZE(tuner_tua6034_ntsc_ranges), 890 .count = ARRAY_SIZE(tuner_tua6034_ntsc_ranges),
996 .config = 0x8e,
997 }, 891 },
998}; 892};
999 893
1000/* ------------ TUNER_YMEC_TVF66T5_B_DFF - Philips PAL ------------ */ 894/* ------------ TUNER_YMEC_TVF66T5_B_DFF - Philips PAL ------------ */
1001 895
1002static struct tuner_range tuner_ymec_tvf66t5_b_dff_pal_ranges[] = {
1003 { 16 * 160.25 /*MHz*/, 0x01, },
1004 { 16 * 464.25 /*MHz*/, 0x02, },
1005 { 16 * 999.99 , 0x08, },
1006};
1007
1008static struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = { 896static struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = {
1009 { 897 {
1010 .type = TUNER_PARAM_TYPE_PAL, 898 .type = TUNER_PARAM_TYPE_PAL,
1011 .ranges = tuner_ymec_tvf66t5_b_dff_pal_ranges, 899 .ranges = tuner_tena_9533_di_pal_ranges,
1012 .count = ARRAY_SIZE(tuner_ymec_tvf66t5_b_dff_pal_ranges), 900 .count = ARRAY_SIZE(tuner_tena_9533_di_pal_ranges),
1013 .config = 0x8e,
1014 }, 901 },
1015}; 902};
1016 903
1017/* ------------ TUNER_LG_NTSC_TALN_MINI - LGINNOTEK NTSC ------------ */ 904/* ------------ TUNER_LG_NTSC_TALN_MINI - LGINNOTEK NTSC ------------ */
1018 905
1019static struct tuner_range tuner_lg_taln_mini_ntsc_ranges[] = { 906static struct tuner_range tuner_lg_taln_ntsc_ranges[] = {
1020 { 16 * 137.25 /*MHz*/, 0x01, }, 907 { 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
1021 { 16 * 373.25 /*MHz*/, 0x02, }, 908 { 16 * 373.25 /*MHz*/, 0x8e, 0x02, },
1022 { 16 * 999.99 , 0x08, }, 909 { 16 * 999.99 , 0x8e, 0x08, },
910};
911
912static struct tuner_range tuner_lg_taln_pal_secam_ranges[] = {
913 { 16 * 150.00 /*MHz*/, 0x8e, 0x01, },
914 { 16 * 425.00 /*MHz*/, 0x8e, 0x02, },
915 { 16 * 999.99 , 0x8e, 0x08, },
1023}; 916};
1024 917
1025static struct tuner_params tuner_lg_taln_mini_params[] = { 918static struct tuner_params tuner_lg_taln_params[] = {
1026 { 919 {
1027 .type = TUNER_PARAM_TYPE_NTSC, 920 .type = TUNER_PARAM_TYPE_NTSC,
1028 .ranges = tuner_lg_taln_mini_ntsc_ranges, 921 .ranges = tuner_lg_taln_ntsc_ranges,
1029 .count = ARRAY_SIZE(tuner_lg_taln_mini_ntsc_ranges), 922 .count = ARRAY_SIZE(tuner_lg_taln_ntsc_ranges),
1030 .config = 0x8e, 923 },{
924 .type = TUNER_PARAM_TYPE_PAL,
925 .ranges = tuner_lg_taln_pal_secam_ranges,
926 .count = ARRAY_SIZE(tuner_lg_taln_pal_secam_ranges),
1031 }, 927 },
1032}; 928};
1033 929
1034/* ------------ TUNER_PHILIPS_TD1316 - Philips PAL ------------ */ 930/* ------------ TUNER_PHILIPS_TD1316 - Philips PAL ------------ */
1035 931
1036static struct tuner_range tuner_philips_td1316_pal_ranges[] = { 932static struct tuner_range tuner_philips_td1316_pal_ranges[] = {
1037 { 16 * 160.00 /*MHz*/, 0xa1, }, 933 { 16 * 160.00 /*MHz*/, 0xc8, 0xa1, },
1038 { 16 * 442.00 /*MHz*/, 0xa2, }, 934 { 16 * 442.00 /*MHz*/, 0xc8, 0xa2, },
1039 { 16 * 999.99 , 0xa4, }, 935 { 16 * 999.99 , 0xc8, 0xa4, },
1040}; 936};
1041 937
1042static struct tuner_params tuner_philips_td1316_params[] = { 938static struct tuner_params tuner_philips_td1316_params[] = {
@@ -1044,34 +940,42 @@ static struct tuner_params tuner_philips_td1316_params[] = {
1044 .type = TUNER_PARAM_TYPE_PAL, 940 .type = TUNER_PARAM_TYPE_PAL,
1045 .ranges = tuner_philips_td1316_pal_ranges, 941 .ranges = tuner_philips_td1316_pal_ranges,
1046 .count = ARRAY_SIZE(tuner_philips_td1316_pal_ranges), 942 .count = ARRAY_SIZE(tuner_philips_td1316_pal_ranges),
1047 .config = 0xc8,
1048 }, 943 },
1049}; 944};
1050 945
1051/* ------------ TUNER_PHILIPS_TUV1236D - Philips ATSC ------------ */ 946/* ------------ TUNER_PHILIPS_TUV1236D - Philips ATSC ------------ */
1052 947
1053static struct tuner_range tuner_tuv1236d_ntsc_ranges[] = { 948static struct tuner_range tuner_tuv1236d_ntsc_ranges[] = {
1054 { 16 * 157.25 /*MHz*/, 0x01, }, 949 { 16 * 157.25 /*MHz*/, 0xce, 0x01, },
1055 { 16 * 454.00 /*MHz*/, 0x02, }, 950 { 16 * 454.00 /*MHz*/, 0xce, 0x02, },
1056 { 16 * 999.99 , 0x04, }, 951 { 16 * 999.99 , 0xce, 0x04, },
1057}; 952};
1058 953
1059 954
1060static struct tuner_params tuner_tuner_tuv1236d_params[] = { 955static struct tuner_params tuner_tuv1236d_params[] = {
1061 { 956 {
1062 .type = TUNER_PARAM_TYPE_NTSC, 957 .type = TUNER_PARAM_TYPE_NTSC,
1063 .ranges = tuner_tuv1236d_ntsc_ranges, 958 .ranges = tuner_tuv1236d_ntsc_ranges,
1064 .count = ARRAY_SIZE(tuner_tuv1236d_ntsc_ranges), 959 .count = ARRAY_SIZE(tuner_tuv1236d_ntsc_ranges),
1065 .config = 0xce,
1066 }, 960 },
1067}; 961};
1068 962
1069/* ------------ TUNER_TNF_5335MF - Philips NTSC ------------ */ 963/* ------------ TUNER_TNF_xxx5 - Texas Instruments--------- */
964/* This is known to work with Tenna TVF58t5-MFF and TVF5835 MFF
965 * but it is expected to work also with other Tenna/Ymec
966 * models based on TI SN 761677 chip on both PAL and NTSC
967 */
968
969static struct tuner_range tuner_tnf_5335_d_if_pal_ranges[] = {
970 { 16 * 168.25 /*MHz*/, 0x8e, 0x01, },
971 { 16 * 471.25 /*MHz*/, 0x8e, 0x02, },
972 { 16 * 999.99 , 0x8e, 0x08, },
973};
1070 974
1071static struct tuner_range tuner_tnf_5335mf_ntsc_ranges[] = { 975static struct tuner_range tuner_tnf_5335mf_ntsc_ranges[] = {
1072 { 16 * 157.25 /*MHz*/, 0x01, }, 976 { 16 * 169.25 /*MHz*/, 0x8e, 0x01, },
1073 { 16 * 454.00 /*MHz*/, 0x02, }, 977 { 16 * 469.25 /*MHz*/, 0x8e, 0x02, },
1074 { 16 * 999.99 , 0x04, }, 978 { 16 * 999.99 , 0x8e, 0x08, },
1075}; 979};
1076 980
1077static struct tuner_params tuner_tnf_5335mf_params[] = { 981static struct tuner_params tuner_tnf_5335mf_params[] = {
@@ -1079,7 +983,11 @@ static struct tuner_params tuner_tnf_5335mf_params[] = {
1079 .type = TUNER_PARAM_TYPE_NTSC, 983 .type = TUNER_PARAM_TYPE_NTSC,
1080 .ranges = tuner_tnf_5335mf_ntsc_ranges, 984 .ranges = tuner_tnf_5335mf_ntsc_ranges,
1081 .count = ARRAY_SIZE(tuner_tnf_5335mf_ntsc_ranges), 985 .count = ARRAY_SIZE(tuner_tnf_5335mf_ntsc_ranges),
1082 .config = 0x8e, 986 },
987 {
988 .type = TUNER_PARAM_TYPE_PAL,
989 .ranges = tuner_tnf_5335_d_if_pal_ranges,
990 .count = ARRAY_SIZE(tuner_tnf_5335_d_if_pal_ranges),
1083 }, 991 },
1084}; 992};
1085 993
@@ -1087,9 +995,9 @@ static struct tuner_params tuner_tnf_5335mf_params[] = {
1087/* ------------ TUNER_SAMSUNG_TCPN_2121P30A - Samsung NTSC ------------ */ 995/* ------------ TUNER_SAMSUNG_TCPN_2121P30A - Samsung NTSC ------------ */
1088 996
1089static struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = { 997static struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = {
1090 { 16 * 175.75 /*MHz*/, 0x01, }, 998 { 16 * 130.00 /*MHz*/, 0xce, 0x01, },
1091 { 16 * 410.25 /*MHz*/, 0x02, }, 999 { 16 * 364.50 /*MHz*/, 0xce, 0x02, },
1092 { 16 * 999.99 , 0x08, }, 1000 { 16 * 999.99 , 0xce, 0x08, },
1093}; 1001};
1094 1002
1095static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = { 1003static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
@@ -1097,7 +1005,22 @@ static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
1097 .type = TUNER_PARAM_TYPE_NTSC, 1005 .type = TUNER_PARAM_TYPE_NTSC,
1098 .ranges = tuner_samsung_tcpn_2121p30a_ntsc_ranges, 1006 .ranges = tuner_samsung_tcpn_2121p30a_ntsc_ranges,
1099 .count = ARRAY_SIZE(tuner_samsung_tcpn_2121p30a_ntsc_ranges), 1007 .count = ARRAY_SIZE(tuner_samsung_tcpn_2121p30a_ntsc_ranges),
1100 .config = 0xce, 1008 },
1009};
1010
1011/* ------------ TUNER_THOMSON_FE6600 - DViCO Hybrid PAL ------------ */
1012
1013static struct tuner_range tuner_thomson_fe6600_ranges[] = {
1014 { 16 * 160.00 /*MHz*/, 0xfe, 0x11, },
1015 { 16 * 442.00 /*MHz*/, 0xf6, 0x12, },
1016 { 16 * 999.99 , 0xf6, 0x18, },
1017};
1018
1019static struct tuner_params tuner_thomson_fe6600_params[] = {
1020 {
1021 .type = TUNER_PARAM_TYPE_PAL,
1022 .ranges = tuner_thomson_fe6600_ranges,
1023 .count = ARRAY_SIZE(tuner_thomson_fe6600_ranges),
1101 }, 1024 },
1102}; 1025};
1103 1026
@@ -1108,18 +1031,22 @@ struct tunertype tuners[] = {
1108 [TUNER_TEMIC_PAL] = { /* TEMIC PAL */ 1031 [TUNER_TEMIC_PAL] = { /* TEMIC PAL */
1109 .name = "Temic PAL (4002 FH5)", 1032 .name = "Temic PAL (4002 FH5)",
1110 .params = tuner_temic_pal_params, 1033 .params = tuner_temic_pal_params,
1034 .count = ARRAY_SIZE(tuner_temic_pal_params),
1111 }, 1035 },
1112 [TUNER_PHILIPS_PAL_I] = { /* Philips PAL_I */ 1036 [TUNER_PHILIPS_PAL_I] = { /* Philips PAL_I */
1113 .name = "Philips PAL_I (FI1246 and compatibles)", 1037 .name = "Philips PAL_I (FI1246 and compatibles)",
1114 .params = tuner_philips_pal_i_params, 1038 .params = tuner_philips_pal_i_params,
1039 .count = ARRAY_SIZE(tuner_philips_pal_i_params),
1115 }, 1040 },
1116 [TUNER_PHILIPS_NTSC] = { /* Philips NTSC */ 1041 [TUNER_PHILIPS_NTSC] = { /* Philips NTSC */
1117 .name = "Philips NTSC (FI1236,FM1236 and compatibles)", 1042 .name = "Philips NTSC (FI1236,FM1236 and compatibles)",
1118 .params = tuner_philips_ntsc_params, 1043 .params = tuner_philips_ntsc_params,
1044 .count = ARRAY_SIZE(tuner_philips_ntsc_params),
1119 }, 1045 },
1120 [TUNER_PHILIPS_SECAM] = { /* Philips SECAM */ 1046 [TUNER_PHILIPS_SECAM] = { /* Philips SECAM */
1121 .name = "Philips (SECAM+PAL_BG) (FI1216MF, FM1216MF, FR1216MF)", 1047 .name = "Philips (SECAM+PAL_BG) (FI1216MF, FM1216MF, FR1216MF)",
1122 .params = tuner_philips_secam_params, 1048 .params = tuner_philips_secam_params,
1049 .count = ARRAY_SIZE(tuner_philips_secam_params),
1123 }, 1050 },
1124 [TUNER_ABSENT] = { /* Tuner Absent */ 1051 [TUNER_ABSENT] = { /* Tuner Absent */
1125 .name = "NoTuner", 1052 .name = "NoTuner",
@@ -1127,120 +1054,148 @@ struct tunertype tuners[] = {
1127 [TUNER_PHILIPS_PAL] = { /* Philips PAL */ 1054 [TUNER_PHILIPS_PAL] = { /* Philips PAL */
1128 .name = "Philips PAL_BG (FI1216 and compatibles)", 1055 .name = "Philips PAL_BG (FI1216 and compatibles)",
1129 .params = tuner_philips_pal_params, 1056 .params = tuner_philips_pal_params,
1057 .count = ARRAY_SIZE(tuner_philips_pal_params),
1130 }, 1058 },
1131 [TUNER_TEMIC_NTSC] = { /* TEMIC NTSC */ 1059 [TUNER_TEMIC_NTSC] = { /* TEMIC NTSC */
1132 .name = "Temic NTSC (4032 FY5)", 1060 .name = "Temic NTSC (4032 FY5)",
1133 .params = tuner_temic_ntsc_params, 1061 .params = tuner_temic_ntsc_params,
1062 .count = ARRAY_SIZE(tuner_temic_ntsc_params),
1134 }, 1063 },
1135 [TUNER_TEMIC_PAL_I] = { /* TEMIC PAL_I */ 1064 [TUNER_TEMIC_PAL_I] = { /* TEMIC PAL_I */
1136 .name = "Temic PAL_I (4062 FY5)", 1065 .name = "Temic PAL_I (4062 FY5)",
1137 .params = tuner_temic_pal_i_params, 1066 .params = tuner_temic_pal_i_params,
1067 .count = ARRAY_SIZE(tuner_temic_pal_i_params),
1138 }, 1068 },
1139 [TUNER_TEMIC_4036FY5_NTSC] = { /* TEMIC NTSC */ 1069 [TUNER_TEMIC_4036FY5_NTSC] = { /* TEMIC NTSC */
1140 .name = "Temic NTSC (4036 FY5)", 1070 .name = "Temic NTSC (4036 FY5)",
1141 .params = tuner_temic_4036fy5_ntsc_params, 1071 .params = tuner_temic_4036fy5_ntsc_params,
1072 .count = ARRAY_SIZE(tuner_temic_4036fy5_ntsc_params),
1142 }, 1073 },
1143 [TUNER_ALPS_TSBH1_NTSC] = { /* TEMIC NTSC */ 1074 [TUNER_ALPS_TSBH1_NTSC] = { /* TEMIC NTSC */
1144 .name = "Alps HSBH1", 1075 .name = "Alps HSBH1",
1145 .params = tuner_alps_tsbh1_ntsc_params, 1076 .params = tuner_alps_tsbh1_ntsc_params,
1077 .count = ARRAY_SIZE(tuner_alps_tsbh1_ntsc_params),
1146 }, 1078 },
1147 1079
1148 /* 10-19 */ 1080 /* 10-19 */
1149 [TUNER_ALPS_TSBE1_PAL] = { /* TEMIC PAL */ 1081 [TUNER_ALPS_TSBE1_PAL] = { /* TEMIC PAL */
1150 .name = "Alps TSBE1", 1082 .name = "Alps TSBE1",
1151 .params = tuner_alps_tsb_1_params, 1083 .params = tuner_alps_tsb_1_params,
1084 .count = ARRAY_SIZE(tuner_alps_tsb_1_params),
1152 }, 1085 },
1153 [TUNER_ALPS_TSBB5_PAL_I] = { /* Alps PAL_I */ 1086 [TUNER_ALPS_TSBB5_PAL_I] = { /* Alps PAL_I */
1154 .name = "Alps TSBB5", 1087 .name = "Alps TSBB5",
1155 .params = tuner_alps_tsbb5_params, 1088 .params = tuner_alps_tsbb5_params,
1089 .count = ARRAY_SIZE(tuner_alps_tsbb5_params),
1156 }, 1090 },
1157 [TUNER_ALPS_TSBE5_PAL] = { /* Alps PAL */ 1091 [TUNER_ALPS_TSBE5_PAL] = { /* Alps PAL */
1158 .name = "Alps TSBE5", 1092 .name = "Alps TSBE5",
1159 .params = tuner_alps_tsbe5_params, 1093 .params = tuner_alps_tsbe5_params,
1094 .count = ARRAY_SIZE(tuner_alps_tsbe5_params),
1160 }, 1095 },
1161 [TUNER_ALPS_TSBC5_PAL] = { /* Alps PAL */ 1096 [TUNER_ALPS_TSBC5_PAL] = { /* Alps PAL */
1162 .name = "Alps TSBC5", 1097 .name = "Alps TSBC5",
1163 .params = tuner_alps_tsbc5_params, 1098 .params = tuner_alps_tsbc5_params,
1099 .count = ARRAY_SIZE(tuner_alps_tsbc5_params),
1164 }, 1100 },
1165 [TUNER_TEMIC_4006FH5_PAL] = { /* TEMIC PAL */ 1101 [TUNER_TEMIC_4006FH5_PAL] = { /* TEMIC PAL */
1166 .name = "Temic PAL_BG (4006FH5)", 1102 .name = "Temic PAL_BG (4006FH5)",
1167 .params = tuner_temic_4006fh5_params, 1103 .params = tuner_temic_4006fh5_params,
1104 .count = ARRAY_SIZE(tuner_temic_4006fh5_params),
1168 }, 1105 },
1169 [TUNER_ALPS_TSHC6_NTSC] = { /* Alps NTSC */ 1106 [TUNER_ALPS_TSHC6_NTSC] = { /* Alps NTSC */
1170 .name = "Alps TSCH6", 1107 .name = "Alps TSCH6",
1171 .params = tuner_alps_tshc6_params, 1108 .params = tuner_alps_tshc6_params,
1109 .count = ARRAY_SIZE(tuner_alps_tshc6_params),
1172 }, 1110 },
1173 [TUNER_TEMIC_PAL_DK] = { /* TEMIC PAL */ 1111 [TUNER_TEMIC_PAL_DK] = { /* TEMIC PAL */
1174 .name = "Temic PAL_DK (4016 FY5)", 1112 .name = "Temic PAL_DK (4016 FY5)",
1175 .params = tuner_temic_pal_dk_params, 1113 .params = tuner_temic_pal_dk_params,
1114 .count = ARRAY_SIZE(tuner_temic_pal_dk_params),
1176 }, 1115 },
1177 [TUNER_PHILIPS_NTSC_M] = { /* Philips NTSC */ 1116 [TUNER_PHILIPS_NTSC_M] = { /* Philips NTSC */
1178 .name = "Philips NTSC_M (MK2)", 1117 .name = "Philips NTSC_M (MK2)",
1179 .params = tuner_philips_ntsc_m_params, 1118 .params = tuner_philips_ntsc_m_params,
1119 .count = ARRAY_SIZE(tuner_philips_ntsc_m_params),
1180 }, 1120 },
1181 [TUNER_TEMIC_4066FY5_PAL_I] = { /* TEMIC PAL_I */ 1121 [TUNER_TEMIC_4066FY5_PAL_I] = { /* TEMIC PAL_I */
1182 .name = "Temic PAL_I (4066 FY5)", 1122 .name = "Temic PAL_I (4066 FY5)",
1183 .params = tuner_temic_4066fy5_pal_i_params, 1123 .params = tuner_temic_4066fy5_pal_i_params,
1124 .count = ARRAY_SIZE(tuner_temic_4066fy5_pal_i_params),
1184 }, 1125 },
1185 [TUNER_TEMIC_4006FN5_MULTI_PAL] = { /* TEMIC PAL */ 1126 [TUNER_TEMIC_4006FN5_MULTI_PAL] = { /* TEMIC PAL */
1186 .name = "Temic PAL* auto (4006 FN5)", 1127 .name = "Temic PAL* auto (4006 FN5)",
1187 .params = tuner_temic_4006fn5_multi_params, 1128 .params = tuner_temic_4006fn5_multi_params,
1129 .count = ARRAY_SIZE(tuner_temic_4006fn5_multi_params),
1188 }, 1130 },
1189 1131
1190 /* 20-29 */ 1132 /* 20-29 */
1191 [TUNER_TEMIC_4009FR5_PAL] = { /* TEMIC PAL */ 1133 [TUNER_TEMIC_4009FR5_PAL] = { /* TEMIC PAL */
1192 .name = "Temic PAL_BG (4009 FR5) or PAL_I (4069 FR5)", 1134 .name = "Temic PAL_BG (4009 FR5) or PAL_I (4069 FR5)",
1193 .params = tuner_temic_4009f_5_params, 1135 .params = tuner_temic_4009f_5_params,
1136 .count = ARRAY_SIZE(tuner_temic_4009f_5_params),
1194 }, 1137 },
1195 [TUNER_TEMIC_4039FR5_NTSC] = { /* TEMIC NTSC */ 1138 [TUNER_TEMIC_4039FR5_NTSC] = { /* TEMIC NTSC */
1196 .name = "Temic NTSC (4039 FR5)", 1139 .name = "Temic NTSC (4039 FR5)",
1197 .params = tuner_temic_4039fr5_params, 1140 .params = tuner_temic_4039fr5_params,
1141 .count = ARRAY_SIZE(tuner_temic_4039fr5_params),
1198 }, 1142 },
1199 [TUNER_TEMIC_4046FM5] = { /* TEMIC PAL */ 1143 [TUNER_TEMIC_4046FM5] = { /* TEMIC PAL */
1200 .name = "Temic PAL/SECAM multi (4046 FM5)", 1144 .name = "Temic PAL/SECAM multi (4046 FM5)",
1201 .params = tuner_temic_4046fm5_params, 1145 .params = tuner_temic_4046fm5_params,
1146 .count = ARRAY_SIZE(tuner_temic_4046fm5_params),
1202 }, 1147 },
1203 [TUNER_PHILIPS_PAL_DK] = { /* Philips PAL */ 1148 [TUNER_PHILIPS_PAL_DK] = { /* Philips PAL */
1204 .name = "Philips PAL_DK (FI1256 and compatibles)", 1149 .name = "Philips PAL_DK (FI1256 and compatibles)",
1205 .params = tuner_philips_pal_dk_params, 1150 .params = tuner_philips_pal_dk_params,
1151 .count = ARRAY_SIZE(tuner_philips_pal_dk_params),
1206 }, 1152 },
1207 [TUNER_PHILIPS_FQ1216ME] = { /* Philips PAL */ 1153 [TUNER_PHILIPS_FQ1216ME] = { /* Philips PAL */
1208 .name = "Philips PAL/SECAM multi (FQ1216ME)", 1154 .name = "Philips PAL/SECAM multi (FQ1216ME)",
1209 .params = tuner_philips_fq1216me_params, 1155 .params = tuner_philips_fq1216me_params,
1156 .count = ARRAY_SIZE(tuner_philips_fq1216me_params),
1210 }, 1157 },
1211 [TUNER_LG_PAL_I_FM] = { /* LGINNOTEK PAL_I */ 1158 [TUNER_LG_PAL_I_FM] = { /* LGINNOTEK PAL_I */
1212 .name = "LG PAL_I+FM (TAPC-I001D)", 1159 .name = "LG PAL_I+FM (TAPC-I001D)",
1213 .params = tuner_lg_pal_i_fm_params, 1160 .params = tuner_lg_pal_i_fm_params,
1161 .count = ARRAY_SIZE(tuner_lg_pal_i_fm_params),
1214 }, 1162 },
1215 [TUNER_LG_PAL_I] = { /* LGINNOTEK PAL_I */ 1163 [TUNER_LG_PAL_I] = { /* LGINNOTEK PAL_I */
1216 .name = "LG PAL_I (TAPC-I701D)", 1164 .name = "LG PAL_I (TAPC-I701D)",
1217 .params = tuner_lg_pal_i_params, 1165 .params = tuner_lg_pal_i_params,
1166 .count = ARRAY_SIZE(tuner_lg_pal_i_params),
1218 }, 1167 },
1219 [TUNER_LG_NTSC_FM] = { /* LGINNOTEK NTSC */ 1168 [TUNER_LG_NTSC_FM] = { /* LGINNOTEK NTSC */
1220 .name = "LG NTSC+FM (TPI8NSR01F)", 1169 .name = "LG NTSC+FM (TPI8NSR01F)",
1221 .params = tuner_lg_ntsc_fm_params, 1170 .params = tuner_lg_ntsc_fm_params,
1171 .count = ARRAY_SIZE(tuner_lg_ntsc_fm_params),
1222 }, 1172 },
1223 [TUNER_LG_PAL_FM] = { /* LGINNOTEK PAL */ 1173 [TUNER_LG_PAL_FM] = { /* LGINNOTEK PAL */
1224 .name = "LG PAL_BG+FM (TPI8PSB01D)", 1174 .name = "LG PAL_BG+FM (TPI8PSB01D)",
1225 .params = tuner_lg_pal_fm_params, 1175 .params = tuner_lg_pal_fm_params,
1176 .count = ARRAY_SIZE(tuner_lg_pal_fm_params),
1226 }, 1177 },
1227 [TUNER_LG_PAL] = { /* LGINNOTEK PAL */ 1178 [TUNER_LG_PAL] = { /* LGINNOTEK PAL */
1228 .name = "LG PAL_BG (TPI8PSB11D)", 1179 .name = "LG PAL_BG (TPI8PSB11D)",
1229 .params = tuner_lg_pal_params, 1180 .params = tuner_lg_pal_params,
1181 .count = ARRAY_SIZE(tuner_lg_pal_params),
1230 }, 1182 },
1231 1183
1232 /* 30-39 */ 1184 /* 30-39 */
1233 [TUNER_TEMIC_4009FN5_MULTI_PAL_FM] = { /* TEMIC PAL */ 1185 [TUNER_TEMIC_4009FN5_MULTI_PAL_FM] = { /* TEMIC PAL */
1234 .name = "Temic PAL* auto + FM (4009 FN5)", 1186 .name = "Temic PAL* auto + FM (4009 FN5)",
1235 .params = tuner_temic_4009_fn5_multi_pal_fm_params, 1187 .params = tuner_temic_4009_fn5_multi_pal_fm_params,
1188 .count = ARRAY_SIZE(tuner_temic_4009_fn5_multi_pal_fm_params),
1236 }, 1189 },
1237 [TUNER_SHARP_2U5JF5540_NTSC] = { /* SHARP NTSC */ 1190 [TUNER_SHARP_2U5JF5540_NTSC] = { /* SHARP NTSC */
1238 .name = "SHARP NTSC_JP (2U5JF5540)", 1191 .name = "SHARP NTSC_JP (2U5JF5540)",
1239 .params = tuner_sharp_2u5jf5540_params, 1192 .params = tuner_sharp_2u5jf5540_params,
1193 .count = ARRAY_SIZE(tuner_sharp_2u5jf5540_params),
1240 }, 1194 },
1241 [TUNER_Samsung_PAL_TCPM9091PD27] = { /* Samsung PAL */ 1195 [TUNER_Samsung_PAL_TCPM9091PD27] = { /* Samsung PAL */
1242 .name = "Samsung PAL TCPM9091PD27", 1196 .name = "Samsung PAL TCPM9091PD27",
1243 .params = tuner_samsung_pal_tcpm9091pd27_params, 1197 .params = tuner_samsung_pal_tcpm9091pd27_params,
1198 .count = ARRAY_SIZE(tuner_samsung_pal_tcpm9091pd27_params),
1244 }, 1199 },
1245 [TUNER_MT2032] = { /* Microtune PAL|NTSC */ 1200 [TUNER_MT2032] = { /* Microtune PAL|NTSC */
1246 .name = "MT20xx universal", 1201 .name = "MT20xx universal",
@@ -1248,86 +1203,106 @@ struct tunertype tuners[] = {
1248 [TUNER_TEMIC_4106FH5] = { /* TEMIC PAL */ 1203 [TUNER_TEMIC_4106FH5] = { /* TEMIC PAL */
1249 .name = "Temic PAL_BG (4106 FH5)", 1204 .name = "Temic PAL_BG (4106 FH5)",
1250 .params = tuner_temic_4106fh5_params, 1205 .params = tuner_temic_4106fh5_params,
1206 .count = ARRAY_SIZE(tuner_temic_4106fh5_params),
1251 }, 1207 },
1252 [TUNER_TEMIC_4012FY5] = { /* TEMIC PAL */ 1208 [TUNER_TEMIC_4012FY5] = { /* TEMIC PAL */
1253 .name = "Temic PAL_DK/SECAM_L (4012 FY5)", 1209 .name = "Temic PAL_DK/SECAM_L (4012 FY5)",
1254 .params = tuner_temic_4012fy5_params, 1210 .params = tuner_temic_4012fy5_params,
1211 .count = ARRAY_SIZE(tuner_temic_4012fy5_params),
1255 }, 1212 },
1256 [TUNER_TEMIC_4136FY5] = { /* TEMIC NTSC */ 1213 [TUNER_TEMIC_4136FY5] = { /* TEMIC NTSC */
1257 .name = "Temic NTSC (4136 FY5)", 1214 .name = "Temic NTSC (4136 FY5)",
1258 .params = tuner_temic_4136_fy5_params, 1215 .params = tuner_temic_4136_fy5_params,
1216 .count = ARRAY_SIZE(tuner_temic_4136_fy5_params),
1259 }, 1217 },
1260 [TUNER_LG_PAL_NEW_TAPC] = { /* LGINNOTEK PAL */ 1218 [TUNER_LG_PAL_NEW_TAPC] = { /* LGINNOTEK PAL */
1261 .name = "LG PAL (newer TAPC series)", 1219 .name = "LG PAL (newer TAPC series)",
1262 .params = tuner_lg_pal_new_tapc_params, 1220 .params = tuner_lg_pal_new_tapc_params,
1221 .count = ARRAY_SIZE(tuner_lg_pal_new_tapc_params),
1263 }, 1222 },
1264 [TUNER_PHILIPS_FM1216ME_MK3] = { /* Philips PAL */ 1223 [TUNER_PHILIPS_FM1216ME_MK3] = { /* Philips PAL */
1265 .name = "Philips PAL/SECAM multi (FM1216ME MK3)", 1224 .name = "Philips PAL/SECAM multi (FM1216ME MK3)",
1266 .params = tuner_fm1216me_mk3_params, 1225 .params = tuner_fm1216me_mk3_params,
1226 .count = ARRAY_SIZE(tuner_fm1216me_mk3_params),
1267 }, 1227 },
1268 [TUNER_LG_NTSC_NEW_TAPC] = { /* LGINNOTEK NTSC */ 1228 [TUNER_LG_NTSC_NEW_TAPC] = { /* LGINNOTEK NTSC */
1269 .name = "LG NTSC (newer TAPC series)", 1229 .name = "LG NTSC (newer TAPC series)",
1270 .params = tuner_lg_ntsc_new_tapc_params, 1230 .params = tuner_lg_ntsc_new_tapc_params,
1231 .count = ARRAY_SIZE(tuner_lg_ntsc_new_tapc_params),
1271 }, 1232 },
1272 1233
1273 /* 40-49 */ 1234 /* 40-49 */
1274 [TUNER_HITACHI_NTSC] = { /* HITACHI NTSC */ 1235 [TUNER_HITACHI_NTSC] = { /* HITACHI NTSC */
1275 .name = "HITACHI V7-J180AT", 1236 .name = "HITACHI V7-J180AT",
1276 .params = tuner_hitachi_ntsc_params, 1237 .params = tuner_hitachi_ntsc_params,
1238 .count = ARRAY_SIZE(tuner_hitachi_ntsc_params),
1277 }, 1239 },
1278 [TUNER_PHILIPS_PAL_MK] = { /* Philips PAL */ 1240 [TUNER_PHILIPS_PAL_MK] = { /* Philips PAL */
1279 .name = "Philips PAL_MK (FI1216 MK)", 1241 .name = "Philips PAL_MK (FI1216 MK)",
1280 .params = tuner_philips_pal_mk_params, 1242 .params = tuner_philips_pal_mk_params,
1243 .count = ARRAY_SIZE(tuner_philips_pal_mk_params),
1281 }, 1244 },
1282 [TUNER_PHILIPS_ATSC] = { /* Philips ATSC */ 1245 [TUNER_PHILIPS_ATSC] = { /* Philips ATSC */
1283 .name = "Philips 1236D ATSC/NTSC dual in", 1246 .name = "Philips 1236D ATSC/NTSC dual in",
1284 .params = tuner_philips_atsc_params, 1247 .params = tuner_philips_atsc_params,
1248 .count = ARRAY_SIZE(tuner_philips_atsc_params),
1285 }, 1249 },
1286 [TUNER_PHILIPS_FM1236_MK3] = { /* Philips NTSC */ 1250 [TUNER_PHILIPS_FM1236_MK3] = { /* Philips NTSC */
1287 .name = "Philips NTSC MK3 (FM1236MK3 or FM1236/F)", 1251 .name = "Philips NTSC MK3 (FM1236MK3 or FM1236/F)",
1288 .params = tuner_fm1236_mk3_params, 1252 .params = tuner_fm1236_mk3_params,
1253 .count = ARRAY_SIZE(tuner_fm1236_mk3_params),
1289 }, 1254 },
1290 [TUNER_PHILIPS_4IN1] = { /* Philips NTSC */ 1255 [TUNER_PHILIPS_4IN1] = { /* Philips NTSC */
1291 .name = "Philips 4 in 1 (ATI TV Wonder Pro/Conexant)", 1256 .name = "Philips 4 in 1 (ATI TV Wonder Pro/Conexant)",
1292 .params = tuner_philips_4in1_params, 1257 .params = tuner_philips_4in1_params,
1258 .count = ARRAY_SIZE(tuner_philips_4in1_params),
1293 }, 1259 },
1294 [TUNER_MICROTUNE_4049FM5] = { /* Microtune PAL */ 1260 [TUNER_MICROTUNE_4049FM5] = { /* Microtune PAL */
1295 .name = "Microtune 4049 FM5", 1261 .name = "Microtune 4049 FM5",
1296 .params = tuner_microtune_4049_fm5_params, 1262 .params = tuner_microtune_4049_fm5_params,
1263 .count = ARRAY_SIZE(tuner_microtune_4049_fm5_params),
1297 }, 1264 },
1298 [TUNER_PANASONIC_VP27] = { /* Panasonic NTSC */ 1265 [TUNER_PANASONIC_VP27] = { /* Panasonic NTSC */
1299 .name = "Panasonic VP27s/ENGE4324D", 1266 .name = "Panasonic VP27s/ENGE4324D",
1300 .params = tuner_panasonic_vp27_params, 1267 .params = tuner_panasonic_vp27_params,
1268 .count = ARRAY_SIZE(tuner_panasonic_vp27_params),
1301 }, 1269 },
1302 [TUNER_LG_NTSC_TAPE] = { /* LGINNOTEK NTSC */ 1270 [TUNER_LG_NTSC_TAPE] = { /* LGINNOTEK NTSC */
1303 .name = "LG NTSC (TAPE series)", 1271 .name = "LG NTSC (TAPE series)",
1304 .params = tuner_lg_ntsc_tape_params, 1272 .params = tuner_lg_ntsc_tape_params,
1273 .count = ARRAY_SIZE(tuner_lg_ntsc_tape_params),
1305 }, 1274 },
1306 [TUNER_TNF_8831BGFF] = { /* Philips PAL */ 1275 [TUNER_TNF_8831BGFF] = { /* Philips PAL */
1307 .name = "Tenna TNF 8831 BGFF)", 1276 .name = "Tenna TNF 8831 BGFF)",
1308 .params = tuner_tnf_8831bgff_params, 1277 .params = tuner_tnf_8831bgff_params,
1278 .count = ARRAY_SIZE(tuner_tnf_8831bgff_params),
1309 }, 1279 },
1310 [TUNER_MICROTUNE_4042FI5] = { /* Microtune NTSC */ 1280 [TUNER_MICROTUNE_4042FI5] = { /* Microtune NTSC */
1311 .name = "Microtune 4042 FI5 ATSC/NTSC dual in", 1281 .name = "Microtune 4042 FI5 ATSC/NTSC dual in",
1312 .params = tuner_microtune_4042fi5_params, 1282 .params = tuner_microtune_4042fi5_params,
1283 .count = ARRAY_SIZE(tuner_microtune_4042fi5_params),
1313 }, 1284 },
1314 1285
1315 /* 50-59 */ 1286 /* 50-59 */
1316 [TUNER_TCL_2002N] = { /* TCL NTSC */ 1287 [TUNER_TCL_2002N] = { /* TCL NTSC */
1317 .name = "TCL 2002N", 1288 .name = "TCL 2002N",
1318 .params = tuner_tcl_2002n_params, 1289 .params = tuner_tcl_2002n_params,
1290 .count = ARRAY_SIZE(tuner_tcl_2002n_params),
1319 }, 1291 },
1320 [TUNER_PHILIPS_FM1256_IH3] = { /* Philips PAL */ 1292 [TUNER_PHILIPS_FM1256_IH3] = { /* Philips PAL */
1321 .name = "Philips PAL/SECAM_D (FM 1256 I-H3)", 1293 .name = "Philips PAL/SECAM_D (FM 1256 I-H3)",
1322 .params = tuner_philips_fm1256_ih3_params, 1294 .params = tuner_philips_fm1256_ih3_params,
1295 .count = ARRAY_SIZE(tuner_philips_fm1256_ih3_params),
1323 }, 1296 },
1324 [TUNER_THOMSON_DTT7610] = { /* THOMSON ATSC */ 1297 [TUNER_THOMSON_DTT7610] = { /* THOMSON ATSC */
1325 .name = "Thomson DTT 7610 (ATSC/NTSC)", 1298 .name = "Thomson DTT 7610 (ATSC/NTSC)",
1326 .params = tuner_thomson_dtt7610_params, 1299 .params = tuner_thomson_dtt7610_params,
1300 .count = ARRAY_SIZE(tuner_thomson_dtt7610_params),
1327 }, 1301 },
1328 [TUNER_PHILIPS_FQ1286] = { /* Philips NTSC */ 1302 [TUNER_PHILIPS_FQ1286] = { /* Philips NTSC */
1329 .name = "Philips FQ1286", 1303 .name = "Philips FQ1286",
1330 .params = tuner_philips_fq1286_params, 1304 .params = tuner_philips_fq1286_params,
1305 .count = ARRAY_SIZE(tuner_philips_fq1286_params),
1331 }, 1306 },
1332 [TUNER_PHILIPS_TDA8290] = { /* Philips PAL|NTSC */ 1307 [TUNER_PHILIPS_TDA8290] = { /* Philips PAL|NTSC */
1333 .name = "tda8290+75", 1308 .name = "tda8290+75",
@@ -1335,22 +1310,27 @@ struct tunertype tuners[] = {
1335 [TUNER_TCL_2002MB] = { /* TCL PAL */ 1310 [TUNER_TCL_2002MB] = { /* TCL PAL */
1336 .name = "TCL 2002MB", 1311 .name = "TCL 2002MB",
1337 .params = tuner_tcl_2002mb_params, 1312 .params = tuner_tcl_2002mb_params,
1313 .count = ARRAY_SIZE(tuner_tcl_2002mb_params),
1338 }, 1314 },
1339 [TUNER_PHILIPS_FQ1216AME_MK4] = { /* Philips PAL */ 1315 [TUNER_PHILIPS_FQ1216AME_MK4] = { /* Philips PAL */
1340 .name = "Philips PAL/SECAM multi (FQ1216AME MK4)", 1316 .name = "Philips PAL/SECAM multi (FQ1216AME MK4)",
1341 .params = tuner_philips_fq1216ame_mk4_params, 1317 .params = tuner_philips_fq1216ame_mk4_params,
1318 .count = ARRAY_SIZE(tuner_philips_fq1216ame_mk4_params),
1342 }, 1319 },
1343 [TUNER_PHILIPS_FQ1236A_MK4] = { /* Philips NTSC */ 1320 [TUNER_PHILIPS_FQ1236A_MK4] = { /* Philips NTSC */
1344 .name = "Philips FQ1236A MK4", 1321 .name = "Philips FQ1236A MK4",
1345 .params = tuner_philips_fq1236a_mk4_params, 1322 .params = tuner_philips_fq1236a_mk4_params,
1323 .count = ARRAY_SIZE(tuner_philips_fq1236a_mk4_params),
1346 }, 1324 },
1347 [TUNER_YMEC_TVF_8531MF] = { /* Philips NTSC */ 1325 [TUNER_YMEC_TVF_8531MF] = { /* Philips NTSC */
1348 .name = "Ymec TVision TVF-8531MF/8831MF/8731MF", 1326 .name = "Ymec TVision TVF-8531MF/8831MF/8731MF",
1349 .params = tuner_ymec_tvf_8531mf_params, 1327 .params = tuner_ymec_tvf_8531mf_params,
1328 .count = ARRAY_SIZE(tuner_ymec_tvf_8531mf_params),
1350 }, 1329 },
1351 [TUNER_YMEC_TVF_5533MF] = { /* Philips NTSC */ 1330 [TUNER_YMEC_TVF_5533MF] = { /* Philips NTSC */
1352 .name = "Ymec TVision TVF-5533MF", 1331 .name = "Ymec TVision TVF-5533MF",
1353 .params = tuner_ymec_tvf_5533mf_params, 1332 .params = tuner_ymec_tvf_5533mf_params,
1333 .count = ARRAY_SIZE(tuner_ymec_tvf_5533mf_params),
1354 }, 1334 },
1355 1335
1356 /* 60-69 */ 1336 /* 60-69 */
@@ -1358,10 +1338,12 @@ struct tunertype tuners[] = {
1358 /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */ 1338 /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
1359 .name = "Thomson DTT 761X (ATSC/NTSC)", 1339 .name = "Thomson DTT 761X (ATSC/NTSC)",
1360 .params = tuner_thomson_dtt761x_params, 1340 .params = tuner_thomson_dtt761x_params,
1341 .count = ARRAY_SIZE(tuner_thomson_dtt761x_params),
1361 }, 1342 },
1362 [TUNER_TENA_9533_DI] = { /* Philips PAL */ 1343 [TUNER_TENA_9533_DI] = { /* Philips PAL */
1363 .name = "Tena TNF9533-D/IF/TNF9533-B/DF", 1344 .name = "Tena TNF9533-D/IF/TNF9533-B/DF",
1364 .params = tuner_tena_9533_di_params, 1345 .params = tuner_tena_9533_di_params,
1346 .count = ARRAY_SIZE(tuner_tena_9533_di_params),
1365 }, 1347 },
1366 [TUNER_TEA5767] = { /* Philips RADIO */ 1348 [TUNER_TEA5767] = { /* Philips RADIO */
1367 .name = "Philips TEA5767HN FM Radio", 1349 .name = "Philips TEA5767HN FM Radio",
@@ -1369,37 +1351,54 @@ struct tunertype tuners[] = {
1369 }, 1351 },
1370 [TUNER_PHILIPS_FMD1216ME_MK3] = { /* Philips PAL */ 1352 [TUNER_PHILIPS_FMD1216ME_MK3] = { /* Philips PAL */
1371 .name = "Philips FMD1216ME MK3 Hybrid Tuner", 1353 .name = "Philips FMD1216ME MK3 Hybrid Tuner",
1372 .params = tuner_tuner_philips_fmd1216me_mk3_params, 1354 .params = tuner_philips_fmd1216me_mk3_params,
1355 .count = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_params),
1373 }, 1356 },
1374 [TUNER_LG_TDVS_H062F] = { /* LGINNOTEK ATSC */ 1357 [TUNER_LG_TDVS_H062F] = { /* LGINNOTEK ATSC */
1375 .name = "LG TDVS-H062F/TUA6034", 1358 .name = "LG TDVS-H062F/TUA6034",
1376 .params = tuner_tua6034_params, 1359 .params = tuner_tua6034_params,
1360 .count = ARRAY_SIZE(tuner_tua6034_params),
1377 }, 1361 },
1378 [TUNER_YMEC_TVF66T5_B_DFF] = { /* Philips PAL */ 1362 [TUNER_YMEC_TVF66T5_B_DFF] = { /* Philips PAL */
1379 .name = "Ymec TVF66T5-B/DFF", 1363 .name = "Ymec TVF66T5-B/DFF",
1380 .params = tuner_ymec_tvf66t5_b_dff_params, 1364 .params = tuner_ymec_tvf66t5_b_dff_params,
1365 .count = ARRAY_SIZE(tuner_ymec_tvf66t5_b_dff_params),
1381 }, 1366 },
1382 [TUNER_LG_NTSC_TALN_MINI] = { /* LGINNOTEK NTSC */ 1367 [TUNER_LG_TALN] = { /* LGINNOTEK NTSC / PAL / SECAM */
1383 .name = "LG NTSC (TALN mini series)", 1368 .name = "LG TALN series",
1384 .params = tuner_lg_taln_mini_params, 1369 .params = tuner_lg_taln_params,
1370 .count = ARRAY_SIZE(tuner_lg_taln_params),
1385 }, 1371 },
1386 [TUNER_PHILIPS_TD1316] = { /* Philips PAL */ 1372 [TUNER_PHILIPS_TD1316] = { /* Philips PAL */
1387 .name = "Philips TD1316 Hybrid Tuner", 1373 .name = "Philips TD1316 Hybrid Tuner",
1388 .params = tuner_philips_td1316_params, 1374 .params = tuner_philips_td1316_params,
1375 .count = ARRAY_SIZE(tuner_philips_td1316_params),
1389 }, 1376 },
1390 [TUNER_PHILIPS_TUV1236D] = { /* Philips ATSC */ 1377 [TUNER_PHILIPS_TUV1236D] = { /* Philips ATSC */
1391 .name = "Philips TUV1236D ATSC/NTSC dual in", 1378 .name = "Philips TUV1236D ATSC/NTSC dual in",
1392 .params = tuner_tuner_tuv1236d_params, 1379 .params = tuner_tuv1236d_params,
1380 .count = ARRAY_SIZE(tuner_tuv1236d_params),
1393 }, 1381 },
1394 [TUNER_TNF_5335MF] = { /* Philips NTSC */ 1382 [TUNER_TNF_5335MF] = { /* Tenna PAL/NTSC */
1395 .name = "Tena TNF 5335 MF", 1383 .name = "Tena TNF 5335 and similar models",
1396 .params = tuner_tnf_5335mf_params, 1384 .params = tuner_tnf_5335mf_params,
1385 .count = ARRAY_SIZE(tuner_tnf_5335mf_params),
1397 }, 1386 },
1398 1387
1399 /* 70-79 */ 1388 /* 70-79 */
1400 [TUNER_SAMSUNG_TCPN_2121P30A] = { /* Samsung NTSC */ 1389 [TUNER_SAMSUNG_TCPN_2121P30A] = { /* Samsung NTSC */
1401 .name = "Samsung TCPN 2121P30A", 1390 .name = "Samsung TCPN 2121P30A",
1402 .params = tuner_samsung_tcpn_2121p30a_params, 1391 .params = tuner_samsung_tcpn_2121p30a_params,
1392 .count = ARRAY_SIZE(tuner_samsung_tcpn_2121p30a_params),
1393 },
1394 [TUNER_XCEIVE_XC3028] = { /* Xceive 3028 */
1395 .name = "Xceive xc3028",
1396 /* see xc3028.c for details */
1397 },
1398 [TUNER_THOMSON_FE6600] = { /* Thomson PAL / DVB-T */
1399 .name = "Thomson FE6600",
1400 .params = tuner_thomson_fe6600_params,
1401 .count = ARRAY_SIZE(tuner_thomson_fe6600_params),
1403 }, 1402 },
1404}; 1403};
1405 1404
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index c8e5ad0e8185..4efb01bb44ac 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -130,6 +130,7 @@ struct CHIPSTATE {
130 struct timer_list wt; 130 struct timer_list wt;
131 int done; 131 int done;
132 int watch_stereo; 132 int watch_stereo;
133 int audmode;
133}; 134};
134 135
135/* ---------------------------------------------------------------------- */ 136/* ---------------------------------------------------------------------- */
@@ -1514,6 +1515,7 @@ static int chip_attach(struct i2c_adapter *adap, int addr, int kind)
1514 chip->type = desc-chiplist; 1515 chip->type = desc-chiplist;
1515 chip->shadow.count = desc->registers+1; 1516 chip->shadow.count = desc->registers+1;
1516 chip->prevmode = -1; 1517 chip->prevmode = -1;
1518 chip->audmode = V4L2_TUNER_MODE_LANG1;
1517 /* register */ 1519 /* register */
1518 i2c_attach_client(&chip->c); 1520 i2c_attach_client(&chip->c);
1519 1521
@@ -1671,6 +1673,8 @@ static int chip_command(struct i2c_client *client,
1671 struct v4l2_tuner *vt = arg; 1673 struct v4l2_tuner *vt = arg;
1672 int mode = 0; 1674 int mode = 0;
1673 1675
1676 if (chip->radio)
1677 break;
1674 switch (vt->audmode) { 1678 switch (vt->audmode) {
1675 case V4L2_TUNER_MODE_MONO: 1679 case V4L2_TUNER_MODE_MONO:
1676 mode = VIDEO_SOUND_MONO; 1680 mode = VIDEO_SOUND_MONO;
@@ -1685,8 +1689,9 @@ static int chip_command(struct i2c_client *client,
1685 mode = VIDEO_SOUND_LANG2; 1689 mode = VIDEO_SOUND_LANG2;
1686 break; 1690 break;
1687 default: 1691 default:
1688 break; 1692 return -EINVAL;
1689 } 1693 }
1694 chip->audmode = vt->audmode;
1690 1695
1691 if (desc->setmode && mode) { 1696 if (desc->setmode && mode) {
1692 chip->watch_stereo = 0; 1697 chip->watch_stereo = 0;
@@ -1704,7 +1709,7 @@ static int chip_command(struct i2c_client *client,
1704 1709
1705 if (chip->radio) 1710 if (chip->radio)
1706 break; 1711 break;
1707 vt->audmode = 0; 1712 vt->audmode = chip->audmode;
1708 vt->rxsubchans = 0; 1713 vt->rxsubchans = 0;
1709 vt->capability = V4L2_TUNER_CAP_STEREO | 1714 vt->capability = V4L2_TUNER_CAP_STEREO |
1710 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1715 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
@@ -1716,19 +1721,12 @@ static int chip_command(struct i2c_client *client,
1716 vt->rxsubchans |= V4L2_TUNER_SUB_MONO; 1721 vt->rxsubchans |= V4L2_TUNER_SUB_MONO;
1717 if (mode & VIDEO_SOUND_STEREO) 1722 if (mode & VIDEO_SOUND_STEREO)
1718 vt->rxsubchans |= V4L2_TUNER_SUB_STEREO; 1723 vt->rxsubchans |= V4L2_TUNER_SUB_STEREO;
1724 /* Note: for SAP it should be mono/lang2 or stereo/lang2.
1725 When this module is converted fully to v4l2, then this
1726 should change for those chips that can detect SAP. */
1719 if (mode & VIDEO_SOUND_LANG1) 1727 if (mode & VIDEO_SOUND_LANG1)
1720 vt->rxsubchans |= V4L2_TUNER_SUB_LANG1 | 1728 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 |
1721 V4L2_TUNER_SUB_LANG2; 1729 V4L2_TUNER_SUB_LANG2;
1722
1723 mode = chip->mode;
1724 if (mode & VIDEO_SOUND_MONO)
1725 vt->audmode = V4L2_TUNER_MODE_MONO;
1726 if (mode & VIDEO_SOUND_STEREO)
1727 vt->audmode = V4L2_TUNER_MODE_STEREO;
1728 if (mode & VIDEO_SOUND_LANG1)
1729 vt->audmode = V4L2_TUNER_MODE_LANG1;
1730 if (mode & VIDEO_SOUND_LANG2)
1731 vt->audmode = V4L2_TUNER_MODE_LANG2;
1732 break; 1730 break;
1733 } 1731 }
1734 1732
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 1864423b3046..69d0fe159f4d 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * tvp5150 - Texas Instruments TVP5150A(M) video decoder driver 2 * tvp5150 - Texas Instruments TVP5150A/AM1 video decoder driver
3 * 3 *
4 * Copyright (c) 2005 Mauro Carvalho Chehab (mchehab@brturbo.com.br) 4 * Copyright (c) 2005,2006 Mauro Carvalho Chehab (mchehab@infradead.org)
5 * This code is placed under the terms of the GNU General Public License 5 * This code is placed under the terms of the GNU General Public License v2
6 */ 6 */
7 7
8#include <linux/i2c.h> 8#include <linux/i2c.h>
@@ -13,10 +13,11 @@
13 13
14#include "tvp5150_reg.h" 14#include "tvp5150_reg.h"
15 15
16MODULE_DESCRIPTION("Texas Instruments TVP5150A video decoder driver"); /* standard i2c insmod options */ 16MODULE_DESCRIPTION("Texas Instruments TVP5150A video decoder driver");
17MODULE_AUTHOR("Mauro Carvalho Chehab"); 17MODULE_AUTHOR("Mauro Carvalho Chehab");
18MODULE_LICENSE("GPL"); 18MODULE_LICENSE("GPL");
19 19
20/* standard i2c insmod options */
20static unsigned short normal_i2c[] = { 21static unsigned short normal_i2c[] = {
21 0xb8 >> 1, 22 0xb8 >> 1,
22 0xba >> 1, 23 0xba >> 1,
@@ -29,6 +30,9 @@ static int debug = 0;
29module_param(debug, int, 0); 30module_param(debug, int, 0);
30MODULE_PARM_DESC(debug, "Debug level (0-1)"); 31MODULE_PARM_DESC(debug, "Debug level (0-1)");
31 32
33#define tvp5150_err(fmt, arg...) do { \
34 printk(KERN_ERR "%s %d-%04x: " fmt, c->driver->driver.name, \
35 i2c_adapter_id(c->adapter), c->addr , ## arg); } while (0)
32#define tvp5150_info(fmt, arg...) do { \ 36#define tvp5150_info(fmt, arg...) do { \
33 printk(KERN_INFO "%s %d-%04x: " fmt, c->driver->driver.name, \ 37 printk(KERN_INFO "%s %d-%04x: " fmt, c->driver->driver.name, \
34 i2c_adapter_id(c->adapter), c->addr , ## arg); } while (0) 38 i2c_adapter_id(c->adapter), c->addr , ## arg); } while (0)
@@ -84,7 +88,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
84struct tvp5150 { 88struct tvp5150 {
85 struct i2c_client *client; 89 struct i2c_client *client;
86 90
87 int norm; 91 v4l2_std_id norm; /* Current set standard */
88 int input; 92 int input;
89 int enable; 93 int enable;
90 int bright; 94 int bright;
@@ -125,310 +129,155 @@ static inline void tvp5150_write(struct i2c_client *c, unsigned char addr,
125 tvp5150_dbg(0, "i2c i/o error: rc == %d (should be 2)\n", rc); 129 tvp5150_dbg(0, "i2c i/o error: rc == %d (should be 2)\n", rc);
126} 130}
127 131
132static void dump_reg_range(struct i2c_client *c, char *s, u8 init, const u8 end,int max_line)
133{
134 int i=0;
135
136 while (init!=(u8)(end+1)) {
137 if ((i%max_line) == 0) {
138 if (i>0)
139 printk("\n");
140 printk("tvp5150: %s reg 0x%02x = ",s,init);
141 }
142 printk("%02x ",tvp5150_read(c, init));
143
144 init++;
145 i++;
146 }
147 printk("\n");
148}
149
128static void dump_reg(struct i2c_client *c) 150static void dump_reg(struct i2c_client *c)
129{ 151{
130 printk("tvp5150: Video input source selection #1 = 0x%02x\n", 152 printk("tvp5150: Video input source selection #1 = 0x%02x\n",
131 tvp5150_read(c, TVP5150_VD_IN_SRC_SEL_1)); 153 tvp5150_read(c, TVP5150_VD_IN_SRC_SEL_1));
132 printk("tvp5150: Analog channel controls = 0x%02x\n", 154 printk("tvp5150: Analog channel controls = 0x%02x\n",
133 tvp5150_read(c, TVP5150_ANAL_CHL_CTL)); 155 tvp5150_read(c, TVP5150_ANAL_CHL_CTL));
134 printk("tvp5150: Operation mode controls = 0x%02x\n", 156 printk("tvp5150: Operation mode controls = 0x%02x\n",
135 tvp5150_read(c, TVP5150_OP_MODE_CTL)); 157 tvp5150_read(c, TVP5150_OP_MODE_CTL));
136 printk("tvp5150: Miscellaneous controls = 0x%02x\n", 158 printk("tvp5150: Miscellaneous controls = 0x%02x\n",
137 tvp5150_read(c, TVP5150_MISC_CTL)); 159 tvp5150_read(c, TVP5150_MISC_CTL));
138 printk("tvp5150: Autoswitch mask: TVP5150A / TVP5150AM = 0x%02x\n", 160 printk("tvp5150: Autoswitch mask= 0x%02x\n",
139 tvp5150_read(c, TVP5150_AUTOSW_MSK)); 161 tvp5150_read(c, TVP5150_AUTOSW_MSK));
140 printk("tvp5150: Color killer threshold control = 0x%02x\n", 162 printk("tvp5150: Color killer threshold control = 0x%02x\n",
141 tvp5150_read(c, TVP5150_COLOR_KIL_THSH_CTL)); 163 tvp5150_read(c, TVP5150_COLOR_KIL_THSH_CTL));
142 printk("tvp5150: Luminance processing control #1 = 0x%02x\n", 164 printk("tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n",
143 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_1)); 165 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_1),
144 printk("tvp5150: Luminance processing control #2 = 0x%02x\n", 166 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_2),
145 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_2)); 167 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_3));
146 printk("tvp5150: Brightness control = 0x%02x\n", 168 printk("tvp5150: Brightness control = 0x%02x\n",
147 tvp5150_read(c, TVP5150_BRIGHT_CTL)); 169 tvp5150_read(c, TVP5150_BRIGHT_CTL));
148 printk("tvp5150: Color saturation control = 0x%02x\n", 170 printk("tvp5150: Color saturation control = 0x%02x\n",
149 tvp5150_read(c, TVP5150_SATURATION_CTL)); 171 tvp5150_read(c, TVP5150_SATURATION_CTL));
150 printk("tvp5150: Hue control = 0x%02x\n", 172 printk("tvp5150: Hue control = 0x%02x\n",
151 tvp5150_read(c, TVP5150_HUE_CTL)); 173 tvp5150_read(c, TVP5150_HUE_CTL));
152 printk("tvp5150: Contrast control = 0x%02x\n", 174 printk("tvp5150: Contrast control = 0x%02x\n",
153 tvp5150_read(c, TVP5150_CONTRAST_CTL)); 175 tvp5150_read(c, TVP5150_CONTRAST_CTL));
154 printk("tvp5150: Outputs and data rates select = 0x%02x\n", 176 printk("tvp5150: Outputs and data rates select = 0x%02x\n",
155 tvp5150_read(c, TVP5150_DATA_RATE_SEL)); 177 tvp5150_read(c, TVP5150_DATA_RATE_SEL));
156 printk("tvp5150: Luminance processing control #3 = 0x%02x\n",
157 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_3));
158 printk("tvp5150: Configuration shared pins = 0x%02x\n", 178 printk("tvp5150: Configuration shared pins = 0x%02x\n",
159 tvp5150_read(c, TVP5150_CONF_SHARED_PIN)); 179 tvp5150_read(c, TVP5150_CONF_SHARED_PIN));
160 printk("tvp5150: Active video cropping start MSB = 0x%02x\n", 180 printk("tvp5150: Active video cropping start = 0x%02x%02x\n",
161 tvp5150_read(c, TVP5150_ACT_VD_CROP_ST_MSB)); 181 tvp5150_read(c, TVP5150_ACT_VD_CROP_ST_MSB),
162 printk("tvp5150: Active video cropping start LSB = 0x%02x\n", 182 tvp5150_read(c, TVP5150_ACT_VD_CROP_ST_LSB));
163 tvp5150_read(c, TVP5150_ACT_VD_CROP_ST_LSB)); 183 printk("tvp5150: Active video cropping stop = 0x%02x%02x\n",
164 printk("tvp5150: Active video cropping stop MSB = 0x%02x\n", 184 tvp5150_read(c, TVP5150_ACT_VD_CROP_STP_MSB),
165 tvp5150_read(c, TVP5150_ACT_VD_CROP_STP_MSB)); 185 tvp5150_read(c, TVP5150_ACT_VD_CROP_STP_LSB));
166 printk("tvp5150: Active video cropping stop LSB = 0x%02x\n",
167 tvp5150_read(c, TVP5150_ACT_VD_CROP_STP_LSB));
168 printk("tvp5150: Genlock/RTC = 0x%02x\n", 186 printk("tvp5150: Genlock/RTC = 0x%02x\n",
169 tvp5150_read(c, TVP5150_GENLOCK)); 187 tvp5150_read(c, TVP5150_GENLOCK));
170 printk("tvp5150: Horizontal sync start = 0x%02x\n", 188 printk("tvp5150: Horizontal sync start = 0x%02x\n",
171 tvp5150_read(c, TVP5150_HORIZ_SYNC_START)); 189 tvp5150_read(c, TVP5150_HORIZ_SYNC_START));
172 printk("tvp5150: Vertical blanking start = 0x%02x\n", 190 printk("tvp5150: Vertical blanking start = 0x%02x\n",
173 tvp5150_read(c, TVP5150_VERT_BLANKING_START)); 191 tvp5150_read(c, TVP5150_VERT_BLANKING_START));
174 printk("tvp5150: Vertical blanking stop = 0x%02x\n", 192 printk("tvp5150: Vertical blanking stop = 0x%02x\n",
175 tvp5150_read(c, TVP5150_VERT_BLANKING_STOP)); 193 tvp5150_read(c, TVP5150_VERT_BLANKING_STOP));
176 printk("tvp5150: Chrominance processing control #1 = 0x%02x\n", 194 printk("tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n",
177 tvp5150_read(c, TVP5150_CHROMA_PROC_CTL_1)); 195 tvp5150_read(c, TVP5150_CHROMA_PROC_CTL_1),
178 printk("tvp5150: Chrominance processing control #2 = 0x%02x\n", 196 tvp5150_read(c, TVP5150_CHROMA_PROC_CTL_2));
179 tvp5150_read(c, TVP5150_CHROMA_PROC_CTL_2));
180 printk("tvp5150: Interrupt reset register B = 0x%02x\n", 197 printk("tvp5150: Interrupt reset register B = 0x%02x\n",
181 tvp5150_read(c, TVP5150_INT_RESET_REG_B)); 198 tvp5150_read(c, TVP5150_INT_RESET_REG_B));
182 printk("tvp5150: Interrupt enable register B = 0x%02x\n", 199 printk("tvp5150: Interrupt enable register B = 0x%02x\n",
183 tvp5150_read(c, TVP5150_INT_ENABLE_REG_B)); 200 tvp5150_read(c, TVP5150_INT_ENABLE_REG_B));
184 printk("tvp5150: Interrupt configuration register B = 0x%02x\n", 201 printk("tvp5150: Interrupt configuration register B = 0x%02x\n",
185 tvp5150_read(c, TVP5150_INTT_CONFIG_REG_B)); 202 tvp5150_read(c, TVP5150_INTT_CONFIG_REG_B));
186 printk("tvp5150: Video standard = 0x%02x\n", 203 printk("tvp5150: Video standard = 0x%02x\n",
187 tvp5150_read(c, TVP5150_VIDEO_STD)); 204 tvp5150_read(c, TVP5150_VIDEO_STD));
188 printk("tvp5150: Cb gain factor = 0x%02x\n", 205 printk("tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n",
189 tvp5150_read(c, TVP5150_CB_GAIN_FACT)); 206 tvp5150_read(c, TVP5150_CB_GAIN_FACT),
190 printk("tvp5150: Cr gain factor = 0x%02x\n", 207 tvp5150_read(c, TVP5150_CR_GAIN_FACTOR));
191 tvp5150_read(c, TVP5150_CR_GAIN_FACTOR));
192 printk("tvp5150: Macrovision on counter = 0x%02x\n", 208 printk("tvp5150: Macrovision on counter = 0x%02x\n",
193 tvp5150_read(c, TVP5150_MACROVISION_ON_CTR)); 209 tvp5150_read(c, TVP5150_MACROVISION_ON_CTR));
194 printk("tvp5150: Macrovision off counter = 0x%02x\n", 210 printk("tvp5150: Macrovision off counter = 0x%02x\n",
195 tvp5150_read(c, TVP5150_MACROVISION_OFF_CTR)); 211 tvp5150_read(c, TVP5150_MACROVISION_OFF_CTR));
196 printk("tvp5150: revision select (TVP5150AM1 only) = 0x%02x\n", 212 printk("tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n",
197 tvp5150_read(c, TVP5150_REV_SELECT)); 213 (tvp5150_read(c, TVP5150_REV_SELECT)&1)?3:4);
198 printk("tvp5150: MSB of device ID = 0x%02x\n", 214 printk("tvp5150: Device ID = %02x%02x\n",
199 tvp5150_read(c, TVP5150_MSB_DEV_ID)); 215 tvp5150_read(c, TVP5150_MSB_DEV_ID),
200 printk("tvp5150: LSB of device ID = 0x%02x\n", 216 tvp5150_read(c, TVP5150_LSB_DEV_ID));
201 tvp5150_read(c, TVP5150_LSB_DEV_ID)); 217 printk("tvp5150: ROM version = (hex) %02x.%02x\n",
202 printk("tvp5150: ROM major version = 0x%02x\n", 218 tvp5150_read(c, TVP5150_ROM_MAJOR_VER),
203 tvp5150_read(c, TVP5150_ROM_MAJOR_VER)); 219 tvp5150_read(c, TVP5150_ROM_MINOR_VER));
204 printk("tvp5150: ROM minor version = 0x%02x\n", 220 printk("tvp5150: Vertical line count = 0x%02x%02x\n",
205 tvp5150_read(c, TVP5150_ROM_MINOR_VER)); 221 tvp5150_read(c, TVP5150_VERT_LN_COUNT_MSB),
206 printk("tvp5150: Vertical line count MSB = 0x%02x\n", 222 tvp5150_read(c, TVP5150_VERT_LN_COUNT_LSB));
207 tvp5150_read(c, TVP5150_VERT_LN_COUNT_MSB));
208 printk("tvp5150: Vertical line count LSB = 0x%02x\n",
209 tvp5150_read(c, TVP5150_VERT_LN_COUNT_LSB));
210 printk("tvp5150: Interrupt status register B = 0x%02x\n", 223 printk("tvp5150: Interrupt status register B = 0x%02x\n",
211 tvp5150_read(c, TVP5150_INT_STATUS_REG_B)); 224 tvp5150_read(c, TVP5150_INT_STATUS_REG_B));
212 printk("tvp5150: Interrupt active register B = 0x%02x\n", 225 printk("tvp5150: Interrupt active register B = 0x%02x\n",
213 tvp5150_read(c, TVP5150_INT_ACTIVE_REG_B)); 226 tvp5150_read(c, TVP5150_INT_ACTIVE_REG_B));
214 printk("tvp5150: Status register #1 = 0x%02x\n", 227 printk("tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n",
215 tvp5150_read(c, TVP5150_STATUS_REG_1)); 228 tvp5150_read(c, TVP5150_STATUS_REG_1),
216 printk("tvp5150: Status register #2 = 0x%02x\n", 229 tvp5150_read(c, TVP5150_STATUS_REG_2),
217 tvp5150_read(c, TVP5150_STATUS_REG_2)); 230 tvp5150_read(c, TVP5150_STATUS_REG_3),
218 printk("tvp5150: Status register #3 = 0x%02x\n", 231 tvp5150_read(c, TVP5150_STATUS_REG_4),
219 tvp5150_read(c, TVP5150_STATUS_REG_3)); 232 tvp5150_read(c, TVP5150_STATUS_REG_5));
220 printk("tvp5150: Status register #4 = 0x%02x\n", 233
221 tvp5150_read(c, TVP5150_STATUS_REG_4)); 234 dump_reg_range(c,"Teletext filter 1", TVP5150_TELETEXT_FIL1_INI,
222 printk("tvp5150: Status register #5 = 0x%02x\n", 235 TVP5150_TELETEXT_FIL1_END,8);
223 tvp5150_read(c, TVP5150_STATUS_REG_5)); 236 dump_reg_range(c,"Teletext filter 2", TVP5150_TELETEXT_FIL2_INI,
224 printk("tvp5150: Closed caption data registers = 0x%02x\n", 237 TVP5150_TELETEXT_FIL2_END,8);
225 tvp5150_read(c, TVP5150_CC_DATA_REG1)); 238
226 printk("tvp5150: Closed caption data registers = 0x%02x\n",
227 tvp5150_read(c, TVP5150_CC_DATA_REG2));
228 printk("tvp5150: Closed caption data registers = 0x%02x\n",
229 tvp5150_read(c, TVP5150_CC_DATA_REG3));
230 printk("tvp5150: Closed caption data registers = 0x%02x\n",
231 tvp5150_read(c, TVP5150_CC_DATA_REG4));
232 printk("tvp5150: WSS data registers = 0x%02x\n",
233 tvp5150_read(c, TVP5150_WSS_DATA_REG1));
234 printk("tvp5150: WSS data registers = 0x%02x\n",
235 tvp5150_read(c, TVP5150_WSS_DATA_REG2));
236 printk("tvp5150: WSS data registers = 0x%02x\n",
237 tvp5150_read(c, TVP5150_WSS_DATA_REG3));
238 printk("tvp5150: WSS data registers = 0x%02x\n",
239 tvp5150_read(c, TVP5150_WSS_DATA_REG4));
240 printk("tvp5150: WSS data registers = 0x%02x\n",
241 tvp5150_read(c, TVP5150_WSS_DATA_REG5));
242 printk("tvp5150: WSS data registers = 0x%02x\n",
243 tvp5150_read(c, TVP5150_WSS_DATA_REG6));
244 printk("tvp5150: VPS data registers = 0x%02x\n",
245 tvp5150_read(c, TVP5150_VPS_DATA_REG1));
246 printk("tvp5150: VPS data registers = 0x%02x\n",
247 tvp5150_read(c, TVP5150_VPS_DATA_REG2));
248 printk("tvp5150: VPS data registers = 0x%02x\n",
249 tvp5150_read(c, TVP5150_VPS_DATA_REG3));
250 printk("tvp5150: VPS data registers = 0x%02x\n",
251 tvp5150_read(c, TVP5150_VPS_DATA_REG4));
252 printk("tvp5150: VPS data registers = 0x%02x\n",
253 tvp5150_read(c, TVP5150_VPS_DATA_REG5));
254 printk("tvp5150: VPS data registers = 0x%02x\n",
255 tvp5150_read(c, TVP5150_VPS_DATA_REG6));
256 printk("tvp5150: VPS data registers = 0x%02x\n",
257 tvp5150_read(c, TVP5150_VPS_DATA_REG7));
258 printk("tvp5150: VPS data registers = 0x%02x\n",
259 tvp5150_read(c, TVP5150_VPS_DATA_REG8));
260 printk("tvp5150: VPS data registers = 0x%02x\n",
261 tvp5150_read(c, TVP5150_VPS_DATA_REG9));
262 printk("tvp5150: VPS data registers = 0x%02x\n",
263 tvp5150_read(c, TVP5150_VPS_DATA_REG10));
264 printk("tvp5150: VPS data registers = 0x%02x\n",
265 tvp5150_read(c, TVP5150_VPS_DATA_REG11));
266 printk("tvp5150: VPS data registers = 0x%02x\n",
267 tvp5150_read(c, TVP5150_VPS_DATA_REG12));
268 printk("tvp5150: VPS data registers = 0x%02x\n",
269 tvp5150_read(c, TVP5150_VPS_DATA_REG13));
270 printk("tvp5150: VITC data registers = 0x%02x\n",
271 tvp5150_read(c, TVP5150_VITC_DATA_REG1));
272 printk("tvp5150: VITC data registers = 0x%02x\n",
273 tvp5150_read(c, TVP5150_VITC_DATA_REG2));
274 printk("tvp5150: VITC data registers = 0x%02x\n",
275 tvp5150_read(c, TVP5150_VITC_DATA_REG3));
276 printk("tvp5150: VITC data registers = 0x%02x\n",
277 tvp5150_read(c, TVP5150_VITC_DATA_REG4));
278 printk("tvp5150: VITC data registers = 0x%02x\n",
279 tvp5150_read(c, TVP5150_VITC_DATA_REG5));
280 printk("tvp5150: VITC data registers = 0x%02x\n",
281 tvp5150_read(c, TVP5150_VITC_DATA_REG6));
282 printk("tvp5150: VITC data registers = 0x%02x\n",
283 tvp5150_read(c, TVP5150_VITC_DATA_REG7));
284 printk("tvp5150: VITC data registers = 0x%02x\n",
285 tvp5150_read(c, TVP5150_VITC_DATA_REG8));
286 printk("tvp5150: VITC data registers = 0x%02x\n",
287 tvp5150_read(c, TVP5150_VITC_DATA_REG9));
288 printk("tvp5150: VBI FIFO read data = 0x%02x\n",
289 tvp5150_read(c, TVP5150_VBI_FIFO_READ_DATA));
290 printk("tvp5150: Teletext filter 1 = 0x%02x\n",
291 tvp5150_read(c, TVP5150_TELETEXT_FIL_1_1));
292 printk("tvp5150: Teletext filter 1 = 0x%02x\n",
293 tvp5150_read(c, TVP5150_TELETEXT_FIL_1_2));
294 printk("tvp5150: Teletext filter 1 = 0x%02x\n",
295 tvp5150_read(c, TVP5150_TELETEXT_FIL_1_3));
296 printk("tvp5150: Teletext filter 1 = 0x%02x\n",
297 tvp5150_read(c, TVP5150_TELETEXT_FIL_1_4));
298 printk("tvp5150: Teletext filter 1 = 0x%02x\n",
299 tvp5150_read(c, TVP5150_TELETEXT_FIL_1_5));
300 printk("tvp5150: Teletext filter 2 = 0x%02x\n",
301 tvp5150_read(c, TVP5150_TELETEXT_FIL_2_1));
302 printk("tvp5150: Teletext filter 2 = 0x%02x\n",
303 tvp5150_read(c, TVP5150_TELETEXT_FIL_2_2));
304 printk("tvp5150: Teletext filter 2 = 0x%02x\n",
305 tvp5150_read(c, TVP5150_TELETEXT_FIL_2_3));
306 printk("tvp5150: Teletext filter 2 = 0x%02x\n",
307 tvp5150_read(c, TVP5150_TELETEXT_FIL_2_4));
308 printk("tvp5150: Teletext filter 2 = 0x%02x\n",
309 tvp5150_read(c, TVP5150_TELETEXT_FIL_2_5));
310 printk("tvp5150: Teletext filter enable = 0x%02x\n", 239 printk("tvp5150: Teletext filter enable = 0x%02x\n",
311 tvp5150_read(c, TVP5150_TELETEXT_FIL_ENA)); 240 tvp5150_read(c, TVP5150_TELETEXT_FIL_ENA));
312 printk("tvp5150: Interrupt status register A = 0x%02x\n", 241 printk("tvp5150: Interrupt status register A = 0x%02x\n",
313 tvp5150_read(c, TVP5150_INT_STATUS_REG_A)); 242 tvp5150_read(c, TVP5150_INT_STATUS_REG_A));
314 printk("tvp5150: Interrupt enable register A = 0x%02x\n", 243 printk("tvp5150: Interrupt enable register A = 0x%02x\n",
315 tvp5150_read(c, TVP5150_INT_ENABLE_REG_A)); 244 tvp5150_read(c, TVP5150_INT_ENABLE_REG_A));
316 printk("tvp5150: Interrupt configuration = 0x%02x\n", 245 printk("tvp5150: Interrupt configuration = 0x%02x\n",
317 tvp5150_read(c, TVP5150_INT_CONF)); 246 tvp5150_read(c, TVP5150_INT_CONF));
318 printk("tvp5150: VDP configuration RAM data = 0x%02x\n",
319 tvp5150_read(c, TVP5150_VDP_CONF_RAM_DATA));
320 printk("tvp5150: Configuration RAM address low byte = 0x%02x\n",
321 tvp5150_read(c, TVP5150_CONF_RAM_ADDR_LOW));
322 printk("tvp5150: Configuration RAM address high byte = 0x%02x\n",
323 tvp5150_read(c, TVP5150_CONF_RAM_ADDR_HIGH));
324 printk("tvp5150: VDP status register = 0x%02x\n", 247 printk("tvp5150: VDP status register = 0x%02x\n",
325 tvp5150_read(c, TVP5150_VDP_STATUS_REG)); 248 tvp5150_read(c, TVP5150_VDP_STATUS_REG));
326 printk("tvp5150: FIFO word count = 0x%02x\n", 249 printk("tvp5150: FIFO word count = 0x%02x\n",
327 tvp5150_read(c, TVP5150_FIFO_WORD_COUNT)); 250 tvp5150_read(c, TVP5150_FIFO_WORD_COUNT));
328 printk("tvp5150: FIFO interrupt threshold = 0x%02x\n", 251 printk("tvp5150: FIFO interrupt threshold = 0x%02x\n",
329 tvp5150_read(c, TVP5150_FIFO_INT_THRESHOLD)); 252 tvp5150_read(c, TVP5150_FIFO_INT_THRESHOLD));
330 printk("tvp5150: FIFO reset = 0x%02x\n", 253 printk("tvp5150: FIFO reset = 0x%02x\n",
331 tvp5150_read(c, TVP5150_FIFO_RESET)); 254 tvp5150_read(c, TVP5150_FIFO_RESET));
332 printk("tvp5150: Line number interrupt = 0x%02x\n", 255 printk("tvp5150: Line number interrupt = 0x%02x\n",
333 tvp5150_read(c, TVP5150_LINE_NUMBER_INT)); 256 tvp5150_read(c, TVP5150_LINE_NUMBER_INT));
334 printk("tvp5150: Pixel alignment register low byte = 0x%02x\n", 257 printk("tvp5150: Pixel alignment register = 0x%02x%02x\n",
335 tvp5150_read(c, TVP5150_PIX_ALIGN_REG_LOW)); 258 tvp5150_read(c, TVP5150_PIX_ALIGN_REG_HIGH),
336 printk("tvp5150: Pixel alignment register high byte = 0x%02x\n", 259 tvp5150_read(c, TVP5150_PIX_ALIGN_REG_LOW));
337 tvp5150_read(c, TVP5150_PIX_ALIGN_REG_HIGH));
338 printk("tvp5150: FIFO output control = 0x%02x\n", 260 printk("tvp5150: FIFO output control = 0x%02x\n",
339 tvp5150_read(c, TVP5150_FIFO_OUT_CTRL)); 261 tvp5150_read(c, TVP5150_FIFO_OUT_CTRL));
340 printk("tvp5150: Full field enable 1 = 0x%02x\n", 262 printk("tvp5150: Full field enable = 0x%02x\n",
341 tvp5150_read(c, TVP5150_FULL_FIELD_ENA_1)); 263 tvp5150_read(c, TVP5150_FULL_FIELD_ENA));
342 printk("tvp5150: Full field enable 2 = 0x%02x\n",
343 tvp5150_read(c, TVP5150_FULL_FIELD_ENA_2));
344 printk("tvp5150: Line mode registers = 0x%02x\n",
345 tvp5150_read(c, TVP5150_LINE_MODE_REG_1));
346 printk("tvp5150: Line mode registers = 0x%02x\n",
347 tvp5150_read(c, TVP5150_LINE_MODE_REG_2));
348 printk("tvp5150: Line mode registers = 0x%02x\n",
349 tvp5150_read(c, TVP5150_LINE_MODE_REG_3));
350 printk("tvp5150: Line mode registers = 0x%02x\n",
351 tvp5150_read(c, TVP5150_LINE_MODE_REG_4));
352 printk("tvp5150: Line mode registers = 0x%02x\n",
353 tvp5150_read(c, TVP5150_LINE_MODE_REG_5));
354 printk("tvp5150: Line mode registers = 0x%02x\n",
355 tvp5150_read(c, TVP5150_LINE_MODE_REG_6));
356 printk("tvp5150: Line mode registers = 0x%02x\n",
357 tvp5150_read(c, TVP5150_LINE_MODE_REG_7));
358 printk("tvp5150: Line mode registers = 0x%02x\n",
359 tvp5150_read(c, TVP5150_LINE_MODE_REG_8));
360 printk("tvp5150: Line mode registers = 0x%02x\n",
361 tvp5150_read(c, TVP5150_LINE_MODE_REG_9));
362 printk("tvp5150: Line mode registers = 0x%02x\n",
363 tvp5150_read(c, TVP5150_LINE_MODE_REG_10));
364 printk("tvp5150: Line mode registers = 0x%02x\n",
365 tvp5150_read(c, TVP5150_LINE_MODE_REG_11));
366 printk("tvp5150: Line mode registers = 0x%02x\n",
367 tvp5150_read(c, TVP5150_LINE_MODE_REG_12));
368 printk("tvp5150: Line mode registers = 0x%02x\n",
369 tvp5150_read(c, TVP5150_LINE_MODE_REG_13));
370 printk("tvp5150: Line mode registers = 0x%02x\n",
371 tvp5150_read(c, TVP5150_LINE_MODE_REG_14));
372 printk("tvp5150: Line mode registers = 0x%02x\n",
373 tvp5150_read(c, TVP5150_LINE_MODE_REG_15));
374 printk("tvp5150: Line mode registers = 0x%02x\n",
375 tvp5150_read(c, TVP5150_LINE_MODE_REG_16));
376 printk("tvp5150: Line mode registers = 0x%02x\n",
377 tvp5150_read(c, TVP5150_LINE_MODE_REG_17));
378 printk("tvp5150: Line mode registers = 0x%02x\n",
379 tvp5150_read(c, TVP5150_LINE_MODE_REG_18));
380 printk("tvp5150: Line mode registers = 0x%02x\n",
381 tvp5150_read(c, TVP5150_LINE_MODE_REG_19));
382 printk("tvp5150: Line mode registers = 0x%02x\n",
383 tvp5150_read(c, TVP5150_LINE_MODE_REG_20));
384 printk("tvp5150: Line mode registers = 0x%02x\n",
385 tvp5150_read(c, TVP5150_LINE_MODE_REG_21));
386 printk("tvp5150: Line mode registers = 0x%02x\n",
387 tvp5150_read(c, TVP5150_LINE_MODE_REG_22));
388 printk("tvp5150: Line mode registers = 0x%02x\n",
389 tvp5150_read(c, TVP5150_LINE_MODE_REG_23));
390 printk("tvp5150: Line mode registers = 0x%02x\n",
391 tvp5150_read(c, TVP5150_LINE_MODE_REG_24));
392 printk("tvp5150: Line mode registers = 0x%02x\n",
393 tvp5150_read(c, TVP5150_LINE_MODE_REG_25));
394 printk("tvp5150: Line mode registers = 0x%02x\n",
395 tvp5150_read(c, TVP5150_LINE_MODE_REG_27));
396 printk("tvp5150: Line mode registers = 0x%02x\n",
397 tvp5150_read(c, TVP5150_LINE_MODE_REG_28));
398 printk("tvp5150: Line mode registers = 0x%02x\n",
399 tvp5150_read(c, TVP5150_LINE_MODE_REG_29));
400 printk("tvp5150: Line mode registers = 0x%02x\n",
401 tvp5150_read(c, TVP5150_LINE_MODE_REG_30));
402 printk("tvp5150: Line mode registers = 0x%02x\n",
403 tvp5150_read(c, TVP5150_LINE_MODE_REG_31));
404 printk("tvp5150: Line mode registers = 0x%02x\n",
405 tvp5150_read(c, TVP5150_LINE_MODE_REG_32));
406 printk("tvp5150: Line mode registers = 0x%02x\n",
407 tvp5150_read(c, TVP5150_LINE_MODE_REG_33));
408 printk("tvp5150: Line mode registers = 0x%02x\n",
409 tvp5150_read(c, TVP5150_LINE_MODE_REG_34));
410 printk("tvp5150: Line mode registers = 0x%02x\n",
411 tvp5150_read(c, TVP5150_LINE_MODE_REG_35));
412 printk("tvp5150: Line mode registers = 0x%02x\n",
413 tvp5150_read(c, TVP5150_LINE_MODE_REG_36));
414 printk("tvp5150: Line mode registers = 0x%02x\n",
415 tvp5150_read(c, TVP5150_LINE_MODE_REG_37));
416 printk("tvp5150: Line mode registers = 0x%02x\n",
417 tvp5150_read(c, TVP5150_LINE_MODE_REG_38));
418 printk("tvp5150: Line mode registers = 0x%02x\n",
419 tvp5150_read(c, TVP5150_LINE_MODE_REG_39));
420 printk("tvp5150: Line mode registers = 0x%02x\n",
421 tvp5150_read(c, TVP5150_LINE_MODE_REG_40));
422 printk("tvp5150: Line mode registers = 0x%02x\n",
423 tvp5150_read(c, TVP5150_LINE_MODE_REG_41));
424 printk("tvp5150: Line mode registers = 0x%02x\n",
425 tvp5150_read(c, TVP5150_LINE_MODE_REG_42));
426 printk("tvp5150: Line mode registers = 0x%02x\n",
427 tvp5150_read(c, TVP5150_LINE_MODE_REG_43));
428 printk("tvp5150: Line mode registers = 0x%02x\n",
429 tvp5150_read(c, TVP5150_LINE_MODE_REG_44));
430 printk("tvp5150: Full field mode register = 0x%02x\n", 264 printk("tvp5150: Full field mode register = 0x%02x\n",
431 tvp5150_read(c, TVP5150_FULL_FIELD_MODE_REG)); 265 tvp5150_read(c, TVP5150_FULL_FIELD_MODE_REG));
266
267 dump_reg_range(c,"CC data", TVP5150_CC_DATA_INI,
268 TVP5150_CC_DATA_END,8);
269
270 dump_reg_range(c,"WSS data", TVP5150_WSS_DATA_INI,
271 TVP5150_WSS_DATA_END,8);
272
273 dump_reg_range(c,"VPS data", TVP5150_VPS_DATA_INI,
274 TVP5150_VPS_DATA_END,8);
275
276 dump_reg_range(c,"VITC data", TVP5150_VITC_DATA_INI,
277 TVP5150_VITC_DATA_END,10);
278
279 dump_reg_range(c,"Line mode", TVP5150_LINE_MODE_INI,
280 TVP5150_LINE_MODE_END,8);
432} 281}
433 282
434/**************************************************************************** 283/****************************************************************************
@@ -593,10 +442,10 @@ static const struct i2c_reg_value tvp5150_init_default[] = {
593 TVP5150_FIFO_OUT_CTRL,0x01 442 TVP5150_FIFO_OUT_CTRL,0x01
594 }, 443 },
595 { /* 0xcf */ 444 { /* 0xcf */
596 TVP5150_FULL_FIELD_ENA_1,0x00 445 TVP5150_FULL_FIELD_ENA,0x00
597 }, 446 },
598 { /* 0xd0 */ 447 { /* 0xd0 */
599 TVP5150_FULL_FIELD_ENA_2,0x00 448 TVP5150_LINE_MODE_INI,0x00
600 }, 449 },
601 { /* 0xfc */ 450 { /* 0xfc */
602 TVP5150_FULL_FIELD_MODE_REG,0x7f 451 TVP5150_FULL_FIELD_MODE_REG,0x7f
@@ -629,54 +478,101 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
629 } 478 }
630}; 479};
631 480
481struct tvp5150_vbi_type {
482 unsigned int vbi_type;
483 unsigned int ini_line;
484 unsigned int end_line;
485 unsigned int by_field :1;
486};
487
632struct i2c_vbi_ram_value { 488struct i2c_vbi_ram_value {
633 u16 reg; 489 u16 reg;
634 unsigned char values[26]; 490 struct tvp5150_vbi_type type;
491 unsigned char values[16];
635}; 492};
636 493
494/* This struct have the values for each supported VBI Standard
495 * by
496 tvp5150_vbi_types should follow the same order as vbi_ram_default
497 * value 0 means rom position 0x10, value 1 means rom position 0x30
498 * and so on. There are 16 possible locations from 0 to 15.
499 */
500
637static struct i2c_vbi_ram_value vbi_ram_default[] = 501static struct i2c_vbi_ram_value vbi_ram_default[] =
638{ 502{
639 {0x010, /* WST SECAM 6 */ 503 {0x010, /* Teletext, SECAM, WST System A */
640 { 0xaa, 0xaa, 0xff, 0xff , 0xe7, 0x2e, 0x20, 0x26, 0xe6, 0xb4, 0x0e, 0x0, 0x0, 0x0, 0x10, 0x0 } 504 {V4L2_SLICED_TELETEXT_SECAM,6,23,1},
505 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26,
506 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 }
641 }, 507 },
642 {0x030, /* WST PAL B 6 */ 508 {0x030, /* Teletext, PAL, WST System B */
643 { 0xaa, 0xaa, 0xff, 0xff , 0x27, 0x2e, 0x20, 0x2b, 0xa6, 0x72, 0x10, 0x0, 0x0, 0x0, 0x10, 0x0 } 509 {V4L2_SLICED_TELETEXT_PAL_B,6,22,1},
510 { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b,
511 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 }
644 }, 512 },
645 {0x050, /* WST PAL C 6 */ 513 {0x050, /* Teletext, PAL, WST System C */
646 { 0xaa, 0xaa, 0xff, 0xff , 0xe7, 0x2e, 0x20, 0x22, 0xa6, 0x98, 0x0d, 0x0, 0x0, 0x0, 0x10, 0x0 } 514 {V4L2_SLICED_TELETEXT_PAL_C,6,22,1},
515 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
516 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
647 }, 517 },
648 {0x070, /* WST NTSC 6 */ 518 {0x070, /* Teletext, NTSC, WST System B */
649 { 0xaa, 0xaa, 0xff, 0xff , 0x27, 0x2e, 0x20, 0x23, 0x69, 0x93, 0x0d, 0x0, 0x0, 0x0, 0x10, 0x0 } 519 {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1},
520 { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23,
521 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
650 }, 522 },
651 {0x090, /* NABTS, NTSC 6 */ 523 {0x090, /* Tetetext, NTSC NABTS System C */
652 { 0xaa, 0xaa, 0xff, 0xff , 0xe7, 0x2e, 0x20, 0x22, 0x69, 0x93, 0x0d, 0x0, 0x0, 0x0, 0x15, 0x0 } 524 {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1},
525 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
526 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 }
653 }, 527 },
654 {0x0b0, /* NABTS, NTSC-J 6 */ 528 {0x0b0, /* Teletext, NTSC-J, NABTS System D */
655 { 0xaa, 0xaa, 0xff, 0xff , 0xa7, 0x2e, 0x20, 0x23, 0x69, 0x93, 0x0d, 0x0, 0x0, 0x0, 0x10, 0x0 } 529 {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1},
530 { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23,
531 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
656 }, 532 },
657 {0x0d0, /* CC, PAL/SECAM 6 */ 533 {0x0d0, /* Closed Caption, PAL/SECAM */
658 { 0xaa, 0x2a, 0xff, 0x3f , 0x04, 0x51, 0x6e, 0x02, 0xa6, 0x7b, 0x09, 0x0, 0x0, 0x0, 0x27, 0x0 } 534 {V4L2_SLICED_CAPTION_625,22,22,1},
535 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
536 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
659 }, 537 },
660 {0x0f0, /* CC, NTSC 6 */ 538 {0x0f0, /* Closed Caption, NTSC */
661 { 0xaa, 0x2a, 0xff, 0x3f , 0x04, 0x51, 0x6e, 0x02, 0x69, 0x8c, 0x09, 0x0, 0x0, 0x0, 0x27, 0x0 } 539 {V4L2_SLICED_CAPTION_525,21,21,1},
540 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
541 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
662 }, 542 },
663 {0x110, /* WSS, PAL/SECAM 6 */ 543 {0x110, /* Wide Screen Signal, PAL/SECAM */
664 { 0x5b, 0x55, 0xc5, 0xff , 0x0, 0x71, 0x6e, 0x42, 0xa6, 0xcd, 0x0f, 0x0, 0x0, 0x0, 0x3a, 0x0 } 544 {V4L2_SLICED_WSS_625,23,23,1},
545 { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42,
546 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 }
665 }, 547 },
666 {0x130, /* WSS, NTSC C */ 548 {0x130, /* Wide Screen Signal, NTSC C */
667 { 0x38, 0x00, 0x3f, 0x00 , 0x0, 0x71, 0x6e, 0x43, 0x69, 0x7c, 0x08, 0x0, 0x0, 0x0, 0x39, 0x0 } 549 {V4L2_SLICED_WSS_525,20,20,1},
550 { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43,
551 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 }
668 }, 552 },
669 {0x150, /* VITC, PAL/SECAM 6 */ 553 {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */
670 { 0x0, 0x0, 0x0, 0x0 , 0x0, 0x8f, 0x6d, 0x49, 0xa6, 0x85, 0x08, 0x0, 0x0, 0x0, 0x4c, 0x0 } 554 {V4l2_SLICED_VITC_625,6,22,0},
555 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
556 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
671 }, 557 },
672 {0x170, /* VITC, NTSC 6 */ 558 {0x170, /* Vertical Interval Timecode (VITC), NTSC */
673 { 0x0, 0x0, 0x0, 0x0 , 0x0, 0x8f, 0x6d, 0x49, 0x69, 0x94, 0x08, 0x0, 0x0, 0x0, 0x4c, 0x0 } 559 {V4l2_SLICED_VITC_525,10,20,0},
560 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
561 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
674 }, 562 },
563 {0x190, /* Video Program System (VPS), PAL */
564 {V4L2_SLICED_VPS,16,16,0},
565 { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d,
566 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 }
567 },
568 /* 0x1d0 User programmable */
569
570 /* End of struct */
675 { (u16)-1 } 571 { (u16)-1 }
676}; 572};
677 573
678static int tvp5150_write_inittab(struct i2c_client *c, 574static int tvp5150_write_inittab(struct i2c_client *c,
679 const struct i2c_reg_value *regs) 575 const struct i2c_reg_value *regs)
680{ 576{
681 while (regs->reg != 0xff) { 577 while (regs->reg != 0xff) {
682 tvp5150_write(c, regs->reg, regs->value); 578 tvp5150_write(c, regs->reg, regs->value);
@@ -686,15 +582,15 @@ static int tvp5150_write_inittab(struct i2c_client *c,
686} 582}
687 583
688static int tvp5150_vdp_init(struct i2c_client *c, 584static int tvp5150_vdp_init(struct i2c_client *c,
689 const struct i2c_vbi_ram_value *regs) 585 const struct i2c_vbi_ram_value *regs)
690{ 586{
691 unsigned int i; 587 unsigned int i;
692 588
693 /* Disable Full Field */ 589 /* Disable Full Field */
694 tvp5150_write(c, TVP5150_FULL_FIELD_ENA_1, 0); 590 tvp5150_write(c, TVP5150_FULL_FIELD_ENA, 0);
695 591
696 /* Before programming, Line mode should be at 0xff */ 592 /* Before programming, Line mode should be at 0xff */
697 for (i=TVP5150_FULL_FIELD_ENA_2; i<=TVP5150_LINE_MODE_REG_44; i++) 593 for (i=TVP5150_LINE_MODE_INI; i<=TVP5150_LINE_MODE_END; i++)
698 tvp5150_write(c, i, 0xff); 594 tvp5150_write(c, i, 0xff);
699 595
700 /* Load Ram Table */ 596 /* Load Ram Table */
@@ -710,6 +606,117 @@ static int tvp5150_vdp_init(struct i2c_client *c,
710 return 0; 606 return 0;
711} 607}
712 608
609/* Fills VBI capabilities based on i2c_vbi_ram_value struct */
610static void tvp5150_vbi_get_cap(const struct i2c_vbi_ram_value *regs,
611 struct v4l2_sliced_vbi_cap *cap)
612{
613 int line;
614
615 memset(cap, 0, sizeof *cap);
616
617 while (regs->reg != (u16)-1 ) {
618 for (line=regs->type.ini_line;line<=regs->type.end_line;line++) {
619 cap->service_lines[0][line] |= regs->type.vbi_type;
620 }
621 cap->service_set |= regs->type.vbi_type;
622
623 regs++;
624 }
625}
626
627/* Set vbi processing
628 * type - one of tvp5150_vbi_types
629 * line - line to gather data
630 * fields: bit 0 field1, bit 1, field2
631 * flags (default=0xf0) is a bitmask, were set means:
632 * bit 7: enable filtering null bytes on CC
633 * bit 6: send data also to FIFO
634 * bit 5: don't allow data with errors on FIFO
635 * bit 4: enable ECC when possible
636 * pix_align = pix alignment:
637 * LSB = field1
638 * MSB = field2
639 */
640static int tvp5150_set_vbi(struct i2c_client *c,
641 const struct i2c_vbi_ram_value *regs,
642 unsigned int type,u8 flags, int line,
643 const int fields)
644{
645 struct tvp5150 *decoder = i2c_get_clientdata(c);
646 v4l2_std_id std=decoder->norm;
647 u8 reg;
648 int pos=0;
649
650 if (std == V4L2_STD_ALL) {
651 tvp5150_err("VBI can't be configured without knowing number of lines\n");
652 return 0;
653 } else if (std && V4L2_STD_625_50) {
654 /* Don't follow NTSC Line number convension */
655 line += 3;
656 }
657
658 if (line<6||line>27)
659 return 0;
660
661 while (regs->reg != (u16)-1 ) {
662 if ((type & regs->type.vbi_type) &&
663 (line>=regs->type.ini_line) &&
664 (line<=regs->type.end_line)) {
665 type=regs->type.vbi_type;
666 break;
667 }
668
669 regs++;
670 pos++;
671 }
672 if (regs->reg == (u16)-1)
673 return 0;
674
675 type=pos | (flags & 0xf0);
676 reg=((line-6)<<1)+TVP5150_LINE_MODE_INI;
677
678 if (fields&1) {
679 tvp5150_write(c, reg, type);
680 }
681
682 if (fields&2) {
683 tvp5150_write(c, reg+1, type);
684 }
685
686 return type;
687}
688
689static int tvp5150_get_vbi(struct i2c_client *c,
690 const struct i2c_vbi_ram_value *regs, int line)
691{
692 struct tvp5150 *decoder = i2c_get_clientdata(c);
693 v4l2_std_id std=decoder->norm;
694 u8 reg;
695 int pos, type=0;
696
697 if (std == V4L2_STD_ALL) {
698 tvp5150_err("VBI can't be configured without knowing number of lines\n");
699 return 0;
700 } else if (std && V4L2_STD_625_50) {
701 /* Don't follow NTSC Line number convension */
702 line += 3;
703 }
704
705 if (line<6||line>27)
706 return 0;
707
708 reg=((line-6)<<1)+TVP5150_LINE_MODE_INI;
709
710 pos=tvp5150_read(c, reg)&0x0f;
711 if (pos<0x0f)
712 type=regs[pos].type.vbi_type;
713
714 pos=tvp5150_read(c, reg+1)&0x0f;
715 if (pos<0x0f)
716 type|=regs[pos].type.vbi_type;
717
718 return type;
719}
713static int tvp5150_set_std(struct i2c_client *c, v4l2_std_id std) 720static int tvp5150_set_std(struct i2c_client *c, v4l2_std_id std)
714{ 721{
715 struct tvp5150 *decoder = i2c_get_clientdata(c); 722 struct tvp5150 *decoder = i2c_get_clientdata(c);
@@ -854,6 +861,69 @@ static int tvp5150_command(struct i2c_client *c,
854 *(v4l2_std_id *)arg = decoder->norm; 861 *(v4l2_std_id *)arg = decoder->norm;
855 break; 862 break;
856 863
864 case VIDIOC_G_SLICED_VBI_CAP:
865 {
866 struct v4l2_sliced_vbi_cap *cap = arg;
867 tvp5150_dbg(1, "VIDIOC_G_SLICED_VBI_CAP\n");
868
869 tvp5150_vbi_get_cap(vbi_ram_default, cap);
870 break;
871 }
872 case VIDIOC_S_FMT:
873 {
874 struct v4l2_format *fmt;
875 struct v4l2_sliced_vbi_format *svbi;
876 int i;
877
878 fmt = arg;
879 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
880 return -EINVAL;
881 svbi = &fmt->fmt.sliced;
882 if (svbi->service_set != 0) {
883 for (i = 0; i <= 23; i++) {
884 svbi->service_lines[1][i] = 0;
885
886 svbi->service_lines[0][i]=tvp5150_set_vbi(c,
887 vbi_ram_default,
888 svbi->service_lines[0][i],0xf0,i,3);
889 }
890 /* Enables FIFO */
891 tvp5150_write(c, TVP5150_FIFO_OUT_CTRL,1);
892 } else {
893 /* Disables FIFO*/
894 tvp5150_write(c, TVP5150_FIFO_OUT_CTRL,0);
895
896 /* Disable Full Field */
897 tvp5150_write(c, TVP5150_FULL_FIELD_ENA, 0);
898
899 /* Disable Line modes */
900 for (i=TVP5150_LINE_MODE_INI; i<=TVP5150_LINE_MODE_END; i++)
901 tvp5150_write(c, i, 0xff);
902 }
903 break;
904 }
905 case VIDIOC_G_FMT:
906 {
907 struct v4l2_format *fmt;
908 struct v4l2_sliced_vbi_format *svbi;
909
910 int i, mask=0;
911
912 fmt = arg;
913 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
914 return -EINVAL;
915 svbi = &fmt->fmt.sliced;
916 memset(svbi, 0, sizeof(*svbi));
917
918 for (i = 0; i <= 23; i++) {
919 svbi->service_lines[0][i]=tvp5150_get_vbi(c,
920 vbi_ram_default,i);
921 mask|=svbi->service_lines[0][i];
922 }
923 svbi->service_set=mask;
924 break;
925 }
926
857#ifdef CONFIG_VIDEO_ADV_DEBUG 927#ifdef CONFIG_VIDEO_ADV_DEBUG
858 case VIDIOC_INT_G_REGISTER: 928 case VIDIOC_INT_G_REGISTER:
859 { 929 {
@@ -878,6 +948,7 @@ static int tvp5150_command(struct i2c_client *c,
878 } 948 }
879#endif 949#endif
880 950
951 case VIDIOC_LOG_STATUS:
881 case DECODER_DUMP: 952 case DECODER_DUMP:
882 dump_reg(c); 953 dump_reg(c);
883 break; 954 break;
@@ -1097,7 +1168,7 @@ static int tvp5150_detect_client(struct i2c_adapter *adapter,
1097 1168
1098 rv = i2c_attach_client(c); 1169 rv = i2c_attach_client(c);
1099 1170
1100 core->norm = V4L2_STD_ALL; 1171 core->norm = V4L2_STD_ALL; /* Default is autodetect */
1101 core->input = 2; 1172 core->input = 2;
1102 core->enable = 1; 1173 core->enable = 1;
1103 core->bright = 32768; 1174 core->bright = 32768;
diff --git a/drivers/media/video/tvp5150_reg.h b/drivers/media/video/tvp5150_reg.h
index cd45c1ded786..4240043c0b2a 100644
--- a/drivers/media/video/tvp5150_reg.h
+++ b/drivers/media/video/tvp5150_reg.h
@@ -1,3 +1,10 @@
1/*
2 * tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers
3 *
4 * Copyright (c) 2005,2006 Mauro Carvalho Chehab (mchehab@infradead.org)
5 * This code is placed under the terms of the GNU General Public License v2
6 */
7
1#define TVP5150_VD_IN_SRC_SEL_1 0x00 /* Video input source selection #1 */ 8#define TVP5150_VD_IN_SRC_SEL_1 0x00 /* Video input source selection #1 */
2#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */ 9#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
3#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */ 10#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
@@ -64,49 +71,32 @@
64#define TVP5150_STATUS_REG_4 0x8b /* Status register #4 */ 71#define TVP5150_STATUS_REG_4 0x8b /* Status register #4 */
65#define TVP5150_STATUS_REG_5 0x8c /* Status register #5 */ 72#define TVP5150_STATUS_REG_5 0x8c /* Status register #5 */
66/* Reserved 8Dh-8Fh */ 73/* Reserved 8Dh-8Fh */
67#define TVP5150_CC_DATA_REG1 0x90 /* Closed caption data registers */ 74 /* Closed caption data registers */
68#define TVP5150_CC_DATA_REG2 0x91 /* Closed caption data registers */ 75#define TVP5150_CC_DATA_INI 0x90
69#define TVP5150_CC_DATA_REG3 0x92 /* Closed caption data registers */ 76#define TVP5150_CC_DATA_END 0x93
70#define TVP5150_CC_DATA_REG4 0x93 /* Closed caption data registers */ 77
71#define TVP5150_WSS_DATA_REG1 0X94 /* WSS data registers */ 78 /* WSS data registers */
72#define TVP5150_WSS_DATA_REG2 0X95 /* WSS data registers */ 79#define TVP5150_WSS_DATA_INI 0x94
73#define TVP5150_WSS_DATA_REG3 0X96 /* WSS data registers */ 80#define TVP5150_WSS_DATA_END 0x99
74#define TVP5150_WSS_DATA_REG4 0X97 /* WSS data registers */ 81
75#define TVP5150_WSS_DATA_REG5 0X98 /* WSS data registers */ 82/* VPS data registers */
76#define TVP5150_WSS_DATA_REG6 0X99 /* WSS data registers */ 83#define TVP5150_VPS_DATA_INI 0x9a
77#define TVP5150_VPS_DATA_REG1 0x9a /* VPS data registers */ 84#define TVP5150_VPS_DATA_END 0xa6
78#define TVP5150_VPS_DATA_REG2 0x9b /* VPS data registers */ 85
79#define TVP5150_VPS_DATA_REG3 0x9c /* VPS data registers */ 86/* VITC data registers */
80#define TVP5150_VPS_DATA_REG4 0x9d /* VPS data registers */ 87#define TVP5150_VITC_DATA_INI 0xa7
81#define TVP5150_VPS_DATA_REG5 0x9e /* VPS data registers */ 88#define TVP5150_VITC_DATA_END 0xaf
82#define TVP5150_VPS_DATA_REG6 0x9f /* VPS data registers */ 89
83#define TVP5150_VPS_DATA_REG7 0xa0 /* VPS data registers */
84#define TVP5150_VPS_DATA_REG8 0xa1 /* VPS data registers */
85#define TVP5150_VPS_DATA_REG9 0xa2 /* VPS data registers */
86#define TVP5150_VPS_DATA_REG10 0xa3 /* VPS data registers */
87#define TVP5150_VPS_DATA_REG11 0xa4 /* VPS data registers */
88#define TVP5150_VPS_DATA_REG12 0xa5 /* VPS data registers */
89#define TVP5150_VPS_DATA_REG13 0xa6 /* VPS data registers */
90#define TVP5150_VITC_DATA_REG1 0xa7 /* VITC data registers */
91#define TVP5150_VITC_DATA_REG2 0xa8 /* VITC data registers */
92#define TVP5150_VITC_DATA_REG3 0xa9 /* VITC data registers */
93#define TVP5150_VITC_DATA_REG4 0xaa /* VITC data registers */
94#define TVP5150_VITC_DATA_REG5 0xab /* VITC data registers */
95#define TVP5150_VITC_DATA_REG6 0xac /* VITC data registers */
96#define TVP5150_VITC_DATA_REG7 0xad /* VITC data registers */
97#define TVP5150_VITC_DATA_REG8 0xae /* VITC data registers */
98#define TVP5150_VITC_DATA_REG9 0xaf /* VITC data registers */
99#define TVP5150_VBI_FIFO_READ_DATA 0xb0 /* VBI FIFO read data */ 90#define TVP5150_VBI_FIFO_READ_DATA 0xb0 /* VBI FIFO read data */
100#define TVP5150_TELETEXT_FIL_1_1 0xb1 /* Teletext filter 1 */ 91
101#define TVP5150_TELETEXT_FIL_1_2 0xb2 /* Teletext filter 1 */ 92/* Teletext filter 1 */
102#define TVP5150_TELETEXT_FIL_1_3 0xb3 /* Teletext filter 1 */ 93#define TVP5150_TELETEXT_FIL1_INI 0xb1
103#define TVP5150_TELETEXT_FIL_1_4 0xb4 /* Teletext filter 1 */ 94#define TVP5150_TELETEXT_FIL1_END 0xb5
104#define TVP5150_TELETEXT_FIL_1_5 0xb5 /* Teletext filter 1 */ 95
105#define TVP5150_TELETEXT_FIL_2_1 0xb6 /* Teletext filter 2 */ 96/* Teletext filter 2 */
106#define TVP5150_TELETEXT_FIL_2_2 0xb7 /* Teletext filter 2 */ 97#define TVP5150_TELETEXT_FIL2_INI 0xb6
107#define TVP5150_TELETEXT_FIL_2_3 0xb8 /* Teletext filter 2 */ 98#define TVP5150_TELETEXT_FIL2_END 0xba
108#define TVP5150_TELETEXT_FIL_2_4 0xb9 /* Teletext filter 2 */ 99
109#define TVP5150_TELETEXT_FIL_2_5 0xba /* Teletext filter 2 */
110#define TVP5150_TELETEXT_FIL_ENA 0xbb /* Teletext filter enable */ 100#define TVP5150_TELETEXT_FIL_ENA 0xbb /* Teletext filter enable */
111/* Reserved BCh-BFh */ 101/* Reserved BCh-BFh */
112#define TVP5150_INT_STATUS_REG_A 0xc0 /* Interrupt status register A */ 102#define TVP5150_INT_STATUS_REG_A 0xc0 /* Interrupt status register A */
@@ -124,50 +114,11 @@
124#define TVP5150_PIX_ALIGN_REG_HIGH 0xcc /* Pixel alignment register high byte */ 114#define TVP5150_PIX_ALIGN_REG_HIGH 0xcc /* Pixel alignment register high byte */
125#define TVP5150_FIFO_OUT_CTRL 0xcd /* FIFO output control */ 115#define TVP5150_FIFO_OUT_CTRL 0xcd /* FIFO output control */
126/* Reserved CEh */ 116/* Reserved CEh */
127#define TVP5150_FULL_FIELD_ENA_1 0xcf /* Full field enable 1 */ 117#define TVP5150_FULL_FIELD_ENA 0xcf /* Full field enable 1 */
128#define TVP5150_FULL_FIELD_ENA_2 0xd0 /* Full field enable 2 */ 118
129#define TVP5150_LINE_MODE_REG_1 0xd1 /* Line mode registers */ 119/* Line mode registers */
130#define TVP5150_LINE_MODE_REG_2 0xd2 /* Line mode registers */ 120#define TVP5150_LINE_MODE_INI 0xd0
131#define TVP5150_LINE_MODE_REG_3 0xd3 /* Line mode registers */ 121#define TVP5150_LINE_MODE_END 0xfb
132#define TVP5150_LINE_MODE_REG_4 0xd4 /* Line mode registers */ 122
133#define TVP5150_LINE_MODE_REG_5 0xd5 /* Line mode registers */
134#define TVP5150_LINE_MODE_REG_6 0xd6 /* Line mode registers */
135#define TVP5150_LINE_MODE_REG_7 0xd7 /* Line mode registers */
136#define TVP5150_LINE_MODE_REG_8 0xd8 /* Line mode registers */
137#define TVP5150_LINE_MODE_REG_9 0xd9 /* Line mode registers */
138#define TVP5150_LINE_MODE_REG_10 0xda /* Line mode registers */
139#define TVP5150_LINE_MODE_REG_11 0xdb /* Line mode registers */
140#define TVP5150_LINE_MODE_REG_12 0xdc /* Line mode registers */
141#define TVP5150_LINE_MODE_REG_13 0xdd /* Line mode registers */
142#define TVP5150_LINE_MODE_REG_14 0xde /* Line mode registers */
143#define TVP5150_LINE_MODE_REG_15 0xdf /* Line mode registers */
144#define TVP5150_LINE_MODE_REG_16 0xe0 /* Line mode registers */
145#define TVP5150_LINE_MODE_REG_17 0xe1 /* Line mode registers */
146#define TVP5150_LINE_MODE_REG_18 0xe2 /* Line mode registers */
147#define TVP5150_LINE_MODE_REG_19 0xe3 /* Line mode registers */
148#define TVP5150_LINE_MODE_REG_20 0xe4 /* Line mode registers */
149#define TVP5150_LINE_MODE_REG_21 0xe5 /* Line mode registers */
150#define TVP5150_LINE_MODE_REG_22 0xe6 /* Line mode registers */
151#define TVP5150_LINE_MODE_REG_23 0xe7 /* Line mode registers */
152#define TVP5150_LINE_MODE_REG_24 0xe8 /* Line mode registers */
153#define TVP5150_LINE_MODE_REG_25 0xe9 /* Line mode registers */
154#define TVP5150_LINE_MODE_REG_27 0xea /* Line mode registers */
155#define TVP5150_LINE_MODE_REG_28 0xeb /* Line mode registers */
156#define TVP5150_LINE_MODE_REG_29 0xec /* Line mode registers */
157#define TVP5150_LINE_MODE_REG_30 0xed /* Line mode registers */
158#define TVP5150_LINE_MODE_REG_31 0xee /* Line mode registers */
159#define TVP5150_LINE_MODE_REG_32 0xef /* Line mode registers */
160#define TVP5150_LINE_MODE_REG_33 0xf0 /* Line mode registers */
161#define TVP5150_LINE_MODE_REG_34 0xf1 /* Line mode registers */
162#define TVP5150_LINE_MODE_REG_35 0xf2 /* Line mode registers */
163#define TVP5150_LINE_MODE_REG_36 0xf3 /* Line mode registers */
164#define TVP5150_LINE_MODE_REG_37 0xf4 /* Line mode registers */
165#define TVP5150_LINE_MODE_REG_38 0xf5 /* Line mode registers */
166#define TVP5150_LINE_MODE_REG_39 0xf6 /* Line mode registers */
167#define TVP5150_LINE_MODE_REG_40 0xf7 /* Line mode registers */
168#define TVP5150_LINE_MODE_REG_41 0xf8 /* Line mode registers */
169#define TVP5150_LINE_MODE_REG_42 0xf9 /* Line mode registers */
170#define TVP5150_LINE_MODE_REG_43 0xfa /* Line mode registers */
171#define TVP5150_LINE_MODE_REG_44 0xfb /* Line mode registers */
172#define TVP5150_FULL_FIELD_MODE_REG 0xfc /* Full field mode register */ 123#define TVP5150_FULL_FIELD_MODE_REG 0xfc /* Full field mode register */
173/* Reserved FDh-FFh */ 124/* Reserved FDh-FFh */
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index cd2c4475525e..95a6e47c99f1 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -97,7 +97,7 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
97 memset(vs, 0, sizeof(struct v4l2_standard)); 97 memset(vs, 0, sizeof(struct v4l2_standard));
98 vs->index = index; 98 vs->index = index;
99 vs->id = id; 99 vs->id = id;
100 if (id & (V4L2_STD_NTSC | V4L2_STD_PAL_M)) { 100 if (id & V4L2_STD_525_60) {
101 vs->frameperiod.numerator = 1001; 101 vs->frameperiod.numerator = 1001;
102 vs->frameperiod.denominator = 30000; 102 vs->frameperiod.denominator = 30000;
103 vs->framelines = 525; 103 vs->framelines = 525;
@@ -110,7 +110,6 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
110 return 0; 110 return 0;
111} 111}
112 112
113
114/* ----------------------------------------------------------------- */ 113/* ----------------------------------------------------------------- */
115/* priority handling */ 114/* priority handling */
116 115
@@ -171,7 +170,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority *local)
171 170
172 171
173/* ----------------------------------------------------------------- */ 172/* ----------------------------------------------------------------- */
174/* some arrays for pretty-printing debug messages */ 173/* some arrays for pretty-printing debug messages of enum types */
175 174
176char *v4l2_field_names[] = { 175char *v4l2_field_names[] = {
177 [V4L2_FIELD_ANY] = "any", 176 [V4L2_FIELD_ANY] = "any",
@@ -192,6 +191,14 @@ char *v4l2_type_names[] = {
192 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", 191 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
193}; 192};
194 193
194static char *v4l2_memory_names[] = {
195 [V4L2_MEMORY_MMAP] = "mmap",
196 [V4L2_MEMORY_USERPTR] = "userptr",
197 [V4L2_MEMORY_OVERLAY] = "overlay",
198};
199
200#define prt_names(a,arr) (((a)>=0)&&((a)<ARRAY_SIZE(arr)))?arr[a]:"unknown"
201
195/* ------------------------------------------------------------------ */ 202/* ------------------------------------------------------------------ */
196/* debug help functions */ 203/* debug help functions */
197 204
@@ -324,6 +331,15 @@ static const char *v4l2_int_ioctls[] = {
324}; 331};
325#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls) 332#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls)
326 333
334static void v4l_print_pix_fmt (char *s, struct v4l2_pix_format *fmt)
335{
336 printk ("%s: width=%d, height=%d, format=%d, field=%s, "
337 "bytesperline=%d sizeimage=%d, colorspace=%d\n", s,
338 fmt->width,fmt->height,fmt->pixelformat,
339 prt_names(fmt->field,v4l2_field_names),
340 fmt->bytesperline,fmt->sizeimage,fmt->colorspace);
341};
342
327/* Common ioctl debug function. This function can be used by 343/* Common ioctl debug function. This function can be used by
328 external ioctl messages as well as internal V4L ioctl */ 344 external ioctl messages as well as internal V4L ioctl */
329void v4l_printk_ioctl(unsigned int cmd) 345void v4l_printk_ioctl(unsigned int cmd)
@@ -362,6 +378,541 @@ void v4l_printk_ioctl(unsigned int cmd)
362 } 378 }
363} 379}
364 380
381/* Common ioctl debug function. This function can be used by
382 external ioctl messages as well as internal V4L ioctl and its
383 arguments */
384void v4l_printk_ioctl_arg(char *s,unsigned int cmd, void *arg)
385{
386 printk(s);
387 printk(": ");
388 v4l_printk_ioctl(cmd);
389 switch (cmd) {
390 case VIDIOC_INT_G_CHIP_IDENT:
391 {
392 enum v4l2_chip_ident *p=arg;
393 printk ("%s: chip ident=%d\n", s, *p);
394 break;
395 }
396 case VIDIOC_G_PRIORITY:
397 case VIDIOC_S_PRIORITY:
398 {
399 enum v4l2_priority *p=arg;
400 printk ("%s: priority=%d\n", s, *p);
401 break;
402 }
403 case VIDIOC_INT_S_TUNER_MODE:
404 {
405 enum v4l2_tuner_type *p=arg;
406 printk ("%s: tuner type=%d\n", s, *p);
407 break;
408 }
409 case DECODER_SET_VBI_BYPASS:
410 case DECODER_ENABLE_OUTPUT:
411 case DECODER_GET_STATUS:
412 case DECODER_SET_OUTPUT:
413 case DECODER_SET_INPUT:
414 case DECODER_SET_GPIO:
415 case DECODER_SET_NORM:
416 case VIDIOCCAPTURE:
417 case VIDIOCSYNC:
418 case VIDIOCSWRITEMODE:
419 case TUNER_SET_TYPE_ADDR:
420 case TUNER_SET_STANDBY:
421 case TDA9887_SET_CONFIG:
422 case AUDC_SET_INPUT:
423 case VIDIOC_OVERLAY_OLD:
424 case VIDIOC_STREAMOFF:
425 case VIDIOC_G_OUTPUT:
426 case VIDIOC_S_OUTPUT:
427 case VIDIOC_STREAMON:
428 case VIDIOC_G_INPUT:
429 case VIDIOC_OVERLAY:
430 case VIDIOC_S_INPUT:
431 {
432 int *p=arg;
433 printk ("%s: value=%d\n", s, *p);
434 break;
435 }
436 case MSP_SET_MATRIX:
437 {
438 struct msp_matrix *p=arg;
439 printk ("%s: input=%d, output=%d\n", s, p->input, p->output);
440 break;
441 }
442 case VIDIOC_G_AUDIO:
443 case VIDIOC_S_AUDIO:
444 case VIDIOC_ENUMAUDIO:
445 case VIDIOC_G_AUDIO_OLD:
446 {
447 struct v4l2_audio *p=arg;
448
449 printk ("%s: index=%d, name=%s, capability=%d, mode=%d\n",
450 s,p->index, p->name,p->capability, p->mode);
451 break;
452 }
453 case VIDIOC_G_AUDOUT:
454 case VIDIOC_S_AUDOUT:
455 case VIDIOC_ENUMAUDOUT:
456 case VIDIOC_G_AUDOUT_OLD:
457 {
458 struct v4l2_audioout *p=arg;
459 printk ("%s: index=%d, name=%s, capability=%d, mode=%d\n", s,
460 p->index, p->name, p->capability,p->mode);
461 break;
462 }
463 case VIDIOC_QBUF:
464 case VIDIOC_DQBUF:
465 case VIDIOC_QUERYBUF:
466 {
467 struct v4l2_buffer *p=arg;
468 struct v4l2_timecode *tc=&p->timecode;
469 printk ("%s: %02ld:%02d:%02d.%08ld index=%d, type=%s, "
470 "bytesused=%d, flags=0x%08d, "
471 "field=%0d, sequence=%d, memory=%s, offset/userptr=0x%08lx\n",
472 s,
473 (p->timestamp.tv_sec/3600),
474 (int)(p->timestamp.tv_sec/60)%60,
475 (int)(p->timestamp.tv_sec%60),
476 p->timestamp.tv_usec,
477 p->index,
478 prt_names(p->type,v4l2_type_names),
479 p->bytesused,p->flags,
480 p->field,p->sequence,
481 prt_names(p->memory,v4l2_memory_names),
482 p->m.userptr);
483 printk ("%s: timecode= %02d:%02d:%02d type=%d, "
484 "flags=0x%08d, frames=%d, userbits=0x%08x",
485 s,tc->hours,tc->minutes,tc->seconds,
486 tc->type, tc->flags, tc->frames, (__u32) tc->userbits);
487 break;
488 }
489 case VIDIOC_QUERYCAP:
490 {
491 struct v4l2_capability *p=arg;
492 printk ("%s: driver=%s, card=%s, bus=%s, version=%d, "
493 "capabilities=%d\n", s,
494 p->driver,p->card,p->bus_info,
495 p->version,
496 p->capabilities);
497 break;
498 }
499 case VIDIOC_G_CTRL:
500 case VIDIOC_S_CTRL:
501 case VIDIOC_S_CTRL_OLD:
502 {
503 struct v4l2_control *p=arg;
504 printk ("%s: id=%d, value=%d\n", s, p->id, p->value);
505 break;
506 }
507 case VIDIOC_G_CROP:
508 case VIDIOC_S_CROP:
509 {
510 struct v4l2_crop *p=arg;
511 /*FIXME: Should also show rect structs */
512 printk ("%s: type=%d\n", s, p->type);
513 break;
514 }
515 case VIDIOC_CROPCAP:
516 case VIDIOC_CROPCAP_OLD:
517 {
518 struct v4l2_cropcap *p=arg;
519 /*FIXME: Should also show rect structs */
520 printk ("%s: type=%d\n", s, p->type);
521 break;
522 }
523 case VIDIOC_INT_DECODE_VBI_LINE:
524 {
525 struct v4l2_decode_vbi_line *p=arg;
526 printk ("%s: is_second_field=%d, ptr=0x%08lx, line=%d, "
527 "type=%d\n", s,
528 p->is_second_field,(unsigned long)p->p,p->line,p->type);
529 break;
530 }
531 case VIDIOC_ENUM_FMT:
532 {
533 struct v4l2_fmtdesc *p=arg;
534 printk ("%s: index=%d, type=%d, flags=%d, description=%s,"
535 " pixelformat=%d\n", s,
536 p->index, p->type, p->flags,p->description,
537 p->pixelformat);
538
539 break;
540 }
541 case VIDIOC_G_FMT:
542 case VIDIOC_S_FMT:
543 case VIDIOC_TRY_FMT:
544 {
545 struct v4l2_format *p=arg;
546 printk ("%s: type=%s\n", s,
547 prt_names(p->type,v4l2_type_names));
548 switch (p->type) {
549 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
550 v4l_print_pix_fmt (s, &p->fmt.pix);
551 break;
552 default:
553 break;
554 }
555 }
556 case VIDIOC_G_FBUF:
557 case VIDIOC_S_FBUF:
558 {
559 struct v4l2_framebuffer *p=arg;
560 printk ("%s: capability=%d, flags=%d, base=0x%08lx\n", s,
561 p->capability,p->flags, (unsigned long)p->base);
562 v4l_print_pix_fmt (s, &p->fmt);
563 break;
564 }
565 case VIDIOC_G_FREQUENCY:
566 case VIDIOC_S_FREQUENCY:
567 {
568 struct v4l2_frequency *p=arg;
569 printk ("%s: tuner=%d, type=%d, frequency=%d\n", s,
570 p->tuner,p->type,p->frequency);
571 break;
572 }
573 case VIDIOC_ENUMINPUT:
574 {
575 struct v4l2_input *p=arg;
576 printk ("%s: index=%d, name=%s, type=%d, audioset=%d, "
577 "tuner=%d, std=%lld, status=%d\n", s,
578 p->index,p->name,p->type,p->audioset,
579 p->tuner,p->std,
580 p->status);
581 break;
582 }
583 case VIDIOC_G_JPEGCOMP:
584 case VIDIOC_S_JPEGCOMP:
585 {
586 struct v4l2_jpegcompression *p=arg;
587 printk ("%s: quality=%d, APPn=%d, APP_len=%d, COM_len=%d,"
588 " jpeg_markers=%d\n", s,
589 p->quality,p->APPn,p->APP_len,
590 p->COM_len,p->jpeg_markers);
591 break;
592 }
593 case VIDIOC_G_MODULATOR:
594 case VIDIOC_S_MODULATOR:
595 {
596 struct v4l2_modulator *p=arg;
597 printk ("%s: index=%d, name=%s, capability=%d, rangelow=%d,"
598 " rangehigh=%d, txsubchans=%d\n", s,
599 p->index, p->name,p->capability,p->rangelow,
600 p->rangehigh,p->txsubchans);
601 break;
602 }
603 case VIDIOC_G_MPEGCOMP:
604 case VIDIOC_S_MPEGCOMP:
605 {
606 struct v4l2_mpeg_compression *p=arg;
607 /*FIXME: Several fields not shown */
608 printk ("%s: ts_pid_pmt=%d, ts_pid_audio=%d, ts_pid_video=%d, "
609 "ts_pid_pcr=%d, ps_size=%d, au_sample_rate=%d, "
610 "au_pesid=%c, vi_frame_rate=%d, vi_frames_per_gop=%d, "
611 "vi_bframes_count=%d, vi_pesid=%c\n", s,
612 p->ts_pid_pmt,p->ts_pid_audio, p->ts_pid_video,
613 p->ts_pid_pcr, p->ps_size, p->au_sample_rate,
614 p->au_pesid, p->vi_frame_rate,
615 p->vi_frames_per_gop, p->vi_bframes_count,
616 p->vi_pesid);
617 break;
618 }
619 case VIDIOC_ENUMOUTPUT:
620 {
621 struct v4l2_output *p=arg;
622 printk ("%s: index=%d, name=%s,type=%d, audioset=%d, "
623 "modulator=%d, std=%lld\n",
624 s,p->index,p->name,p->type,p->audioset,
625 p->modulator,p->std);
626 break;
627 }
628 case VIDIOC_QUERYCTRL:
629 {
630 struct v4l2_queryctrl *p=arg;
631 printk ("%s: id=%d, type=%d, name=%s, min/max=%d/%d,"
632 " step=%d, default=%d, flags=0x%08x\n", s,
633 p->id,p->type,p->name,p->minimum,p->maximum,
634 p->step,p->default_value,p->flags);
635 break;
636 }
637 case VIDIOC_QUERYMENU:
638 {
639 struct v4l2_querymenu *p=arg;
640 printk ("%s: id=%d, index=%d, name=%s\n", s,
641 p->id,p->index,p->name);
642 break;
643 }
644 case VIDIOC_INT_G_REGISTER:
645 case VIDIOC_INT_S_REGISTER:
646 {
647 struct v4l2_register *p=arg;
648 printk ("%s: i2c_id=%d, reg=%lu, val=%d\n", s,
649 p->i2c_id,p->reg,p->val);
650
651 break;
652 }
653 case VIDIOC_REQBUFS:
654 {
655 struct v4l2_requestbuffers *p=arg;
656 printk ("%s: count=%d, type=%s, memory=%s\n", s,
657 p->count,
658 prt_names(p->type,v4l2_type_names),
659 prt_names(p->memory,v4l2_memory_names));
660 break;
661 }
662 case VIDIOC_INT_S_AUDIO_ROUTING:
663 case VIDIOC_INT_S_VIDEO_ROUTING:
664 case VIDIOC_INT_G_AUDIO_ROUTING:
665 case VIDIOC_INT_G_VIDEO_ROUTING:
666 {
667 struct v4l2_routing *p=arg;
668 printk ("%s: input=%d, output=%d\n", s, p->input, p->output);
669 break;
670 }
671 case VIDIOC_G_SLICED_VBI_CAP:
672 {
673 struct v4l2_sliced_vbi_cap *p=arg;
674 printk ("%s: service_set=%d\n", s,
675 p->service_set);
676 break;
677 }
678 case VIDIOC_INT_S_VBI_DATA:
679 case VIDIOC_INT_G_VBI_DATA:
680 {
681 struct v4l2_sliced_vbi_data *p=arg;
682 printk ("%s: id=%d, field=%d, line=%d\n", s,
683 p->id, p->field, p->line);
684 break;
685 }
686 case VIDIOC_ENUMSTD:
687 {
688 struct v4l2_standard *p=arg;
689 printk ("%s: index=%d, id=%lld, name=%s, fps=%d/%d, framelines=%d\n", s,
690 p->index, p->id, p->name,
691 p->frameperiod.numerator,
692 p->frameperiod.denominator,
693 p->framelines);
694
695 break;
696 }
697 case VIDIOC_G_PARM:
698 case VIDIOC_S_PARM:
699 case VIDIOC_S_PARM_OLD:
700 {
701 struct v4l2_streamparm *p=arg;
702 printk ("%s: type=%d\n", s, p->type);
703
704 break;
705 }
706 case VIDIOC_G_TUNER:
707 case VIDIOC_S_TUNER:
708 {
709 struct v4l2_tuner *p=arg;
710 printk ("%s: index=%d, name=%s, type=%d, capability=%d, "
711 "rangelow=%d, rangehigh=%d, signal=%d, afc=%d, "
712 "rxsubchans=%d, audmode=%d\n", s,
713 p->index, p->name, p->type,
714 p->capability, p->rangelow,p->rangehigh,
715 p->rxsubchans, p->audmode, p->signal,
716 p->afc);
717 break;
718 }
719 case VIDIOCGVBIFMT:
720 case VIDIOCSVBIFMT:
721 {
722 struct vbi_format *p=arg;
723 printk ("%s: sampling_rate=%d, samples_per_line=%d, "
724 "sample_format=%d, start=%d/%d, count=%d/%d, flags=%d\n", s,
725 p->sampling_rate,p->samples_per_line,
726 p->sample_format,p->start[0],p->start[1],
727 p->count[0],p->count[1],p->flags);
728 break;
729 }
730 case VIDIOCGAUDIO:
731 case VIDIOCSAUDIO:
732 {
733 struct video_audio *p=arg;
734 printk ("%s: audio=%d, volume=%d, bass=%d, treble=%d, "
735 "flags=%d, name=%s, mode=%d, balance=%d, step=%d\n",
736 s,p->audio,p->volume,p->bass, p->treble,
737 p->flags,p->name,p->mode,p->balance,p->step);
738 break;
739 }
740 case VIDIOCGFBUF:
741 case VIDIOCSFBUF:
742 {
743 struct video_buffer *p=arg;
744 printk ("%s: base=%08lx, height=%d, width=%d, depth=%d, "
745 "bytesperline=%d\n", s,
746 (unsigned long) p->base, p->height, p->width,
747 p->depth,p->bytesperline);
748 break;
749 }
750 case VIDIOCGCAP:
751 {
752 struct video_capability *p=arg;
753 printk ("%s: name=%s, type=%d, channels=%d, audios=%d, "
754 "maxwidth=%d, maxheight=%d, minwidth=%d, minheight=%d\n",
755 s,p->name,p->type,p->channels,p->audios,
756 p->maxwidth,p->maxheight,p->minwidth,
757 p->minheight);
758
759 break;
760 }
761 case VIDIOCGCAPTURE:
762 case VIDIOCSCAPTURE:
763 {
764 struct video_capture *p=arg;
765 printk ("%s: x=%d, y=%d, width=%d, height=%d, decimation=%d,"
766 " flags=%d\n", s,
767 p->x, p->y,p->width, p->height,
768 p->decimation,p->flags);
769 break;
770 }
771 case VIDIOCGCHAN:
772 case VIDIOCSCHAN:
773 {
774 struct video_channel *p=arg;
775 printk ("%s: channel=%d, name=%s, tuners=%d, flags=%d, "
776 "type=%d, norm=%d\n", s,
777 p->channel,p->name,p->tuners,
778 p->flags,p->type,p->norm);
779
780 break;
781 }
782 case VIDIOCSMICROCODE:
783 {
784 struct video_code *p=arg;
785 printk ("%s: loadwhat=%s, datasize=%d\n", s,
786 p->loadwhat,p->datasize);
787 break;
788 }
789 case DECODER_GET_CAPABILITIES:
790 {
791 struct video_decoder_capability *p=arg;
792 printk ("%s: flags=%d, inputs=%d, outputs=%d\n", s,
793 p->flags,p->inputs,p->outputs);
794 break;
795 }
796 case DECODER_INIT:
797 {
798 struct video_decoder_init *p=arg;
799 printk ("%s: len=%c\n", s, p->len);
800 break;
801 }
802 case VIDIOCGPLAYINFO:
803 {
804 struct video_info *p=arg;
805 printk ("%s: frame_count=%d, h_size=%d, v_size=%d, "
806 "smpte_timecode=%d, picture_type=%d, "
807 "temporal_reference=%d, user_data=%s\n", s,
808 p->frame_count, p->h_size,
809 p->v_size, p->smpte_timecode,
810 p->picture_type, p->temporal_reference,
811 p->user_data);
812 break;
813 }
814 case VIDIOCKEY:
815 {
816 struct video_key *p=arg;
817 printk ("%s: key=%s, flags=%d\n", s,
818 p->key, p->flags);
819 break;
820 }
821 case VIDIOCGMBUF:
822 {
823 struct video_mbuf *p=arg;
824 printk ("%s: size=%d, frames=%d, offsets=0x%08lx\n", s,
825 p->size,
826 p->frames,
827 (unsigned long)p->offsets);
828 break;
829 }
830 case VIDIOCMCAPTURE:
831 {
832 struct video_mmap *p=arg;
833 printk ("%s: frame=%d, height=%d, width=%d, format=%d\n", s,
834 p->frame,
835 p->height, p->width,
836 p->format);
837 break;
838 }
839 case VIDIOCGPICT:
840 case VIDIOCSPICT:
841 case DECODER_SET_PICTURE:
842 {
843 struct video_picture *p=arg;
844
845 printk ("%s: brightness=%d, hue=%d, colour=%d, contrast=%d,"
846 " whiteness=%d, depth=%d, palette=%d\n", s,
847 p->brightness, p->hue, p->colour,
848 p->contrast, p->whiteness, p->depth,
849 p->palette);
850 break;
851 }
852 case VIDIOCSPLAYMODE:
853 {
854 struct video_play_mode *p=arg;
855 printk ("%s: mode=%d, p1=%d, p2=%d\n", s,
856 p->mode,p->p1,p->p2);
857 break;
858 }
859 case VIDIOCGTUNER:
860 case VIDIOCSTUNER:
861 {
862 struct video_tuner *p=arg;
863 printk ("%s: tuner=%d, name=%s, rangelow=%ld, rangehigh=%ld, "
864 "flags=%d, mode=%d, signal=%d\n", s,
865 p->tuner, p->name,p->rangelow, p->rangehigh,
866 p->flags,p->mode, p->signal);
867 break;
868 }
869 case VIDIOCGUNIT:
870 {
871 struct video_unit *p=arg;
872 printk ("%s: video=%d, vbi=%d, radio=%d, audio=%d, "
873 "teletext=%d\n", s,
874 p->video,p->vbi,p->radio,p->audio,p->teletext);
875 break;
876 }
877 case VIDIOCGWIN:
878 case VIDIOCSWIN:
879 {
880 struct video_window *p=arg;
881 printk ("%s: x=%d, y=%d, width=%d, height=%d, chromakey=%d,"
882 " flags=%d, clipcount=%d\n", s,
883 p->x, p->y,p->width, p->height,
884 p->chromakey,p->flags,
885 p->clipcount);
886 break;
887 }
888 case VIDIOC_INT_AUDIO_CLOCK_FREQ:
889 case VIDIOC_INT_I2S_CLOCK_FREQ:
890 case VIDIOC_INT_S_STANDBY:
891 {
892 u32 *p=arg;
893
894 printk ("%s: value=%d\n", s, *p);
895 break;
896 }
897 case VIDIOCGFREQ:
898 case VIDIOCSFREQ:
899 {
900 unsigned long *p=arg;
901 printk ("%s: value=%lu\n", s, *p);
902 break;
903 }
904 case VIDIOC_G_STD:
905 case VIDIOC_S_STD:
906 case VIDIOC_QUERYSTD:
907 {
908 v4l2_std_id *p=arg;
909
910 printk ("%s: value=%llu\n", s, *p);
911 break;
912 }
913 }
914}
915
365/* ----------------------------------------------------------------- */ 916/* ----------------------------------------------------------------- */
366 917
367EXPORT_SYMBOL(v4l2_video_std_construct); 918EXPORT_SYMBOL(v4l2_video_std_construct);
@@ -376,6 +927,7 @@ EXPORT_SYMBOL(v4l2_prio_check);
376EXPORT_SYMBOL(v4l2_field_names); 927EXPORT_SYMBOL(v4l2_field_names);
377EXPORT_SYMBOL(v4l2_type_names); 928EXPORT_SYMBOL(v4l2_type_names);
378EXPORT_SYMBOL(v4l_printk_ioctl); 929EXPORT_SYMBOL(v4l_printk_ioctl);
930EXPORT_SYMBOL(v4l_printk_ioctl_arg);
379 931
380/* 932/*
381 * Local variables: 933 * Local variables:
diff --git a/drivers/media/video/video-buf-dvb.c b/drivers/media/video/video-buf-dvb.c
index 0a4004a4393c..caf3e7e2f219 100644
--- a/drivers/media/video/video-buf-dvb.c
+++ b/drivers/media/video/video-buf-dvb.c
@@ -96,7 +96,7 @@ static int videobuf_dvb_start_feed(struct dvb_demux_feed *feed)
96 if (!demux->dmx.frontend) 96 if (!demux->dmx.frontend)
97 return -EINVAL; 97 return -EINVAL;
98 98
99 down(&dvb->lock); 99 mutex_lock(&dvb->lock);
100 dvb->nfeeds++; 100 dvb->nfeeds++;
101 rc = dvb->nfeeds; 101 rc = dvb->nfeeds;
102 102
@@ -110,7 +110,7 @@ static int videobuf_dvb_start_feed(struct dvb_demux_feed *feed)
110 } 110 }
111 111
112out: 112out:
113 up(&dvb->lock); 113 mutex_unlock(&dvb->lock);
114 return rc; 114 return rc;
115} 115}
116 116
@@ -120,14 +120,14 @@ static int videobuf_dvb_stop_feed(struct dvb_demux_feed *feed)
120 struct videobuf_dvb *dvb = demux->priv; 120 struct videobuf_dvb *dvb = demux->priv;
121 int err = 0; 121 int err = 0;
122 122
123 down(&dvb->lock); 123 mutex_lock(&dvb->lock);
124 dvb->nfeeds--; 124 dvb->nfeeds--;
125 if (0 == dvb->nfeeds && NULL != dvb->thread) { 125 if (0 == dvb->nfeeds && NULL != dvb->thread) {
126 // FIXME: cx8802_cancel_buffers(dev); 126 // FIXME: cx8802_cancel_buffers(dev);
127 err = kthread_stop(dvb->thread); 127 err = kthread_stop(dvb->thread);
128 dvb->thread = NULL; 128 dvb->thread = NULL;
129 } 129 }
130 up(&dvb->lock); 130 mutex_unlock(&dvb->lock);
131 return err; 131 return err;
132} 132}
133 133
@@ -139,7 +139,7 @@ int videobuf_dvb_register(struct videobuf_dvb *dvb,
139{ 139{
140 int result; 140 int result;
141 141
142 init_MUTEX(&dvb->lock); 142 mutex_init(&dvb->lock);
143 143
144 /* register adapter */ 144 /* register adapter */
145 result = dvb_register_adapter(&dvb->adapter, dvb->name, module); 145 result = dvb_register_adapter(&dvb->adapter, dvb->name, module);
diff --git a/drivers/media/video/video-buf.c b/drivers/media/video/video-buf.c
index 9ef477523d27..87e937581d5a 100644
--- a/drivers/media/video/video-buf.c
+++ b/drivers/media/video/video-buf.c
@@ -59,8 +59,7 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
59 pg = vmalloc_to_page(virt); 59 pg = vmalloc_to_page(virt);
60 if (NULL == pg) 60 if (NULL == pg)
61 goto err; 61 goto err;
62 if (PageHighMem(pg)) 62 BUG_ON(PageHighMem(pg));
63 BUG();
64 sglist[i].page = pg; 63 sglist[i].page = pg;
65 sglist[i].length = PAGE_SIZE; 64 sglist[i].length = PAGE_SIZE;
66 } 65 }
@@ -385,7 +384,7 @@ void videobuf_queue_init(struct videobuf_queue* q,
385 q->ops = ops; 384 q->ops = ops;
386 q->priv_data = priv; 385 q->priv_data = priv;
387 386
388 init_MUTEX(&q->lock); 387 mutex_init(&q->lock);
389 INIT_LIST_HEAD(&q->stream); 388 INIT_LIST_HEAD(&q->stream);
390} 389}
391 390
@@ -428,7 +427,7 @@ videobuf_queue_is_busy(struct videobuf_queue *q)
428void 427void
429videobuf_queue_cancel(struct videobuf_queue *q) 428videobuf_queue_cancel(struct videobuf_queue *q)
430{ 429{
431 unsigned long flags; 430 unsigned long flags=0;
432 int i; 431 int i;
433 432
434 /* remove queued buffers from list */ 433 /* remove queued buffers from list */
@@ -549,7 +548,7 @@ videobuf_reqbufs(struct videobuf_queue *q,
549 if (!list_empty(&q->stream)) 548 if (!list_empty(&q->stream))
550 return -EBUSY; 549 return -EBUSY;
551 550
552 down(&q->lock); 551 mutex_lock(&q->lock);
553 count = req->count; 552 count = req->count;
554 if (count > VIDEO_MAX_FRAME) 553 if (count > VIDEO_MAX_FRAME)
555 count = VIDEO_MAX_FRAME; 554 count = VIDEO_MAX_FRAME;
@@ -566,7 +565,7 @@ videobuf_reqbufs(struct videobuf_queue *q,
566 req->count = count; 565 req->count = count;
567 566
568 done: 567 done:
569 up(&q->lock); 568 mutex_unlock(&q->lock);
570 return retval; 569 return retval;
571} 570}
572 571
@@ -589,10 +588,10 @@ videobuf_qbuf(struct videobuf_queue *q,
589{ 588{
590 struct videobuf_buffer *buf; 589 struct videobuf_buffer *buf;
591 enum v4l2_field field; 590 enum v4l2_field field;
592 unsigned long flags; 591 unsigned long flags=0;
593 int retval; 592 int retval;
594 593
595 down(&q->lock); 594 mutex_lock(&q->lock);
596 retval = -EBUSY; 595 retval = -EBUSY;
597 if (q->reading) 596 if (q->reading)
598 goto done; 597 goto done;
@@ -652,7 +651,7 @@ videobuf_qbuf(struct videobuf_queue *q,
652 retval = 0; 651 retval = 0;
653 652
654 done: 653 done:
655 up(&q->lock); 654 mutex_unlock(&q->lock);
656 return retval; 655 return retval;
657} 656}
658 657
@@ -663,7 +662,7 @@ videobuf_dqbuf(struct videobuf_queue *q,
663 struct videobuf_buffer *buf; 662 struct videobuf_buffer *buf;
664 int retval; 663 int retval;
665 664
666 down(&q->lock); 665 mutex_lock(&q->lock);
667 retval = -EBUSY; 666 retval = -EBUSY;
668 if (q->reading) 667 if (q->reading)
669 goto done; 668 goto done;
@@ -693,7 +692,7 @@ videobuf_dqbuf(struct videobuf_queue *q,
693 videobuf_status(b,buf,q->type); 692 videobuf_status(b,buf,q->type);
694 693
695 done: 694 done:
696 up(&q->lock); 695 mutex_unlock(&q->lock);
697 return retval; 696 return retval;
698} 697}
699 698
@@ -701,10 +700,10 @@ int videobuf_streamon(struct videobuf_queue *q)
701{ 700{
702 struct videobuf_buffer *buf; 701 struct videobuf_buffer *buf;
703 struct list_head *list; 702 struct list_head *list;
704 unsigned long flags; 703 unsigned long flags=0;
705 int retval; 704 int retval;
706 705
707 down(&q->lock); 706 mutex_lock(&q->lock);
708 retval = -EBUSY; 707 retval = -EBUSY;
709 if (q->reading) 708 if (q->reading)
710 goto done; 709 goto done;
@@ -721,7 +720,7 @@ int videobuf_streamon(struct videobuf_queue *q)
721 spin_unlock_irqrestore(q->irqlock,flags); 720 spin_unlock_irqrestore(q->irqlock,flags);
722 721
723 done: 722 done:
724 up(&q->lock); 723 mutex_unlock(&q->lock);
725 return retval; 724 return retval;
726} 725}
727 726
@@ -729,7 +728,7 @@ int videobuf_streamoff(struct videobuf_queue *q)
729{ 728{
730 int retval = -EINVAL; 729 int retval = -EINVAL;
731 730
732 down(&q->lock); 731 mutex_lock(&q->lock);
733 if (!q->streaming) 732 if (!q->streaming)
734 goto done; 733 goto done;
735 videobuf_queue_cancel(q); 734 videobuf_queue_cancel(q);
@@ -737,7 +736,7 @@ int videobuf_streamoff(struct videobuf_queue *q)
737 retval = 0; 736 retval = 0;
738 737
739 done: 738 done:
740 up(&q->lock); 739 mutex_unlock(&q->lock);
741 return retval; 740 return retval;
742} 741}
743 742
@@ -746,7 +745,7 @@ videobuf_read_zerocopy(struct videobuf_queue *q, char __user *data,
746 size_t count, loff_t *ppos) 745 size_t count, loff_t *ppos)
747{ 746{
748 enum v4l2_field field; 747 enum v4l2_field field;
749 unsigned long flags; 748 unsigned long flags=0;
750 int retval; 749 int retval;
751 750
752 /* setup stuff */ 751 /* setup stuff */
@@ -788,11 +787,11 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
788 int nonblocking) 787 int nonblocking)
789{ 788{
790 enum v4l2_field field; 789 enum v4l2_field field;
791 unsigned long flags; 790 unsigned long flags=0;
792 unsigned size, nbufs, bytes; 791 unsigned size, nbufs, bytes;
793 int retval; 792 int retval;
794 793
795 down(&q->lock); 794 mutex_lock(&q->lock);
796 795
797 nbufs = 1; size = 0; 796 nbufs = 1; size = 0;
798 q->ops->buf_setup(q,&nbufs,&size); 797 q->ops->buf_setup(q,&nbufs,&size);
@@ -860,14 +859,14 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
860 } 859 }
861 860
862 done: 861 done:
863 up(&q->lock); 862 mutex_unlock(&q->lock);
864 return retval; 863 return retval;
865} 864}
866 865
867int videobuf_read_start(struct videobuf_queue *q) 866int videobuf_read_start(struct videobuf_queue *q)
868{ 867{
869 enum v4l2_field field; 868 enum v4l2_field field;
870 unsigned long flags; 869 unsigned long flags=0;
871 int count = 0, size = 0; 870 int count = 0, size = 0;
872 int err, i; 871 int err, i;
873 872
@@ -919,10 +918,10 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
919{ 918{
920 unsigned int *fc, bytes; 919 unsigned int *fc, bytes;
921 int err, retval; 920 int err, retval;
922 unsigned long flags; 921 unsigned long flags=0;
923 922
924 dprintk(2,"%s\n",__FUNCTION__); 923 dprintk(2,"%s\n",__FUNCTION__);
925 down(&q->lock); 924 mutex_lock(&q->lock);
926 retval = -EBUSY; 925 retval = -EBUSY;
927 if (q->streaming) 926 if (q->streaming)
928 goto done; 927 goto done;
@@ -996,7 +995,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
996 } 995 }
997 996
998 done: 997 done:
999 up(&q->lock); 998 mutex_unlock(&q->lock);
1000 return retval; 999 return retval;
1001} 1000}
1002 1001
@@ -1007,7 +1006,7 @@ unsigned int videobuf_poll_stream(struct file *file,
1007 struct videobuf_buffer *buf = NULL; 1006 struct videobuf_buffer *buf = NULL;
1008 unsigned int rc = 0; 1007 unsigned int rc = 0;
1009 1008
1010 down(&q->lock); 1009 mutex_lock(&q->lock);
1011 if (q->streaming) { 1010 if (q->streaming) {
1012 if (!list_empty(&q->stream)) 1011 if (!list_empty(&q->stream))
1013 buf = list_entry(q->stream.next, 1012 buf = list_entry(q->stream.next,
@@ -1035,7 +1034,7 @@ unsigned int videobuf_poll_stream(struct file *file,
1035 buf->state == STATE_ERROR) 1034 buf->state == STATE_ERROR)
1036 rc = POLLIN|POLLRDNORM; 1035 rc = POLLIN|POLLRDNORM;
1037 } 1036 }
1038 up(&q->lock); 1037 mutex_unlock(&q->lock);
1039 return rc; 1038 return rc;
1040} 1039}
1041 1040
@@ -1064,7 +1063,7 @@ videobuf_vm_close(struct vm_area_struct *vma)
1064 map->count--; 1063 map->count--;
1065 if (0 == map->count) { 1064 if (0 == map->count) {
1066 dprintk(1,"munmap %p q=%p\n",map,q); 1065 dprintk(1,"munmap %p q=%p\n",map,q);
1067 down(&q->lock); 1066 mutex_lock(&q->lock);
1068 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 1067 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1069 if (NULL == q->bufs[i]) 1068 if (NULL == q->bufs[i])
1070 continue; 1069 continue;
@@ -1076,7 +1075,7 @@ videobuf_vm_close(struct vm_area_struct *vma)
1076 q->bufs[i]->baddr = 0; 1075 q->bufs[i]->baddr = 0;
1077 q->ops->buf_release(q,q->bufs[i]); 1076 q->ops->buf_release(q,q->bufs[i]);
1078 } 1077 }
1079 up(&q->lock); 1078 mutex_unlock(&q->lock);
1080 kfree(map); 1079 kfree(map);
1081 } 1080 }
1082 return; 1081 return;
@@ -1170,7 +1169,7 @@ int videobuf_mmap_mapper(struct videobuf_queue *q,
1170 unsigned int first,last,size,i; 1169 unsigned int first,last,size,i;
1171 int retval; 1170 int retval;
1172 1171
1173 down(&q->lock); 1172 mutex_lock(&q->lock);
1174 retval = -EINVAL; 1173 retval = -EINVAL;
1175 if (!(vma->vm_flags & VM_WRITE)) { 1174 if (!(vma->vm_flags & VM_WRITE)) {
1176 dprintk(1,"mmap app bug: PROT_WRITE please\n"); 1175 dprintk(1,"mmap app bug: PROT_WRITE please\n");
@@ -1238,7 +1237,7 @@ int videobuf_mmap_mapper(struct videobuf_queue *q,
1238 retval = 0; 1237 retval = 0;
1239 1238
1240 done: 1239 done:
1241 up(&q->lock); 1240 mutex_unlock(&q->lock);
1242 return retval; 1241 return retval;
1243} 1242}
1244 1243
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 078880e4c8c0..75e3d41382f2 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -224,13 +224,13 @@ int video_exclusive_open(struct inode *inode, struct file *file)
224 struct video_device *vfl = video_devdata(file); 224 struct video_device *vfl = video_devdata(file);
225 int retval = 0; 225 int retval = 0;
226 226
227 down(&vfl->lock); 227 mutex_lock(&vfl->lock);
228 if (vfl->users) { 228 if (vfl->users) {
229 retval = -EBUSY; 229 retval = -EBUSY;
230 } else { 230 } else {
231 vfl->users++; 231 vfl->users++;
232 } 232 }
233 up(&vfl->lock); 233 mutex_unlock(&vfl->lock);
234 return retval; 234 return retval;
235} 235}
236 236
@@ -279,23 +279,23 @@ int video_register_device(struct video_device *vfd, int type, int nr)
279 switch(type) 279 switch(type)
280 { 280 {
281 case VFL_TYPE_GRABBER: 281 case VFL_TYPE_GRABBER:
282 base=0; 282 base=MINOR_VFL_TYPE_GRABBER_MIN;
283 end=64; 283 end=MINOR_VFL_TYPE_GRABBER_MAX+1;
284 name_base = "video"; 284 name_base = "video";
285 break; 285 break;
286 case VFL_TYPE_VTX: 286 case VFL_TYPE_VTX:
287 base=192; 287 base=MINOR_VFL_TYPE_VTX_MIN;
288 end=224; 288 end=MINOR_VFL_TYPE_VTX_MAX+1;
289 name_base = "vtx"; 289 name_base = "vtx";
290 break; 290 break;
291 case VFL_TYPE_VBI: 291 case VFL_TYPE_VBI:
292 base=224; 292 base=MINOR_VFL_TYPE_VBI_MIN;
293 end=256; 293 end=MINOR_VFL_TYPE_VBI_MAX+1;
294 name_base = "vbi"; 294 name_base = "vbi";
295 break; 295 break;
296 case VFL_TYPE_RADIO: 296 case VFL_TYPE_RADIO:
297 base=64; 297 base=MINOR_VFL_TYPE_RADIO_MIN;
298 end=128; 298 end=MINOR_VFL_TYPE_RADIO_MAX+1;
299 name_base = "radio"; 299 name_base = "radio";
300 break; 300 break;
301 default: 301 default:
@@ -328,7 +328,7 @@ int video_register_device(struct video_device *vfd, int type, int nr)
328 sprintf(vfd->devfs_name, "v4l/%s%d", name_base, i - base); 328 sprintf(vfd->devfs_name, "v4l/%s%d", name_base, i - base);
329 devfs_mk_cdev(MKDEV(VIDEO_MAJOR, vfd->minor), 329 devfs_mk_cdev(MKDEV(VIDEO_MAJOR, vfd->minor),
330 S_IFCHR | S_IRUSR | S_IWUSR, vfd->devfs_name); 330 S_IFCHR | S_IRUSR | S_IWUSR, vfd->devfs_name);
331 init_MUTEX(&vfd->lock); 331 mutex_init(&vfd->lock);
332 332
333 /* sysfs class */ 333 /* sysfs class */
334 memset(&vfd->class_dev, 0x00, sizeof(vfd->class_dev)); 334 memset(&vfd->class_dev, 0x00, sizeof(vfd->class_dev));
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index c8fd8238904d..0229819d0aac 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -42,6 +42,7 @@
42#include <linux/videodev.h> 42#include <linux/videodev.h>
43#include <linux/videodev2.h> 43#include <linux/videodev2.h>
44#include <linux/video_decoder.h> 44#include <linux/video_decoder.h>
45#include <linux/mutex.h>
45 46
46#include <asm/paccess.h> 47#include <asm/paccess.h>
47#include <asm/io.h> 48#include <asm/io.h>
@@ -245,7 +246,7 @@ struct vino_framebuffer_queue {
245 struct vino_framebuffer *buffer[VINO_FRAMEBUFFER_COUNT_MAX]; 246 struct vino_framebuffer *buffer[VINO_FRAMEBUFFER_COUNT_MAX];
246 247
247 spinlock_t queue_lock; 248 spinlock_t queue_lock;
248 struct semaphore queue_sem; 249 struct mutex queue_mutex;
249 wait_queue_head_t frame_wait_queue; 250 wait_queue_head_t frame_wait_queue;
250}; 251};
251 252
@@ -283,7 +284,7 @@ struct vino_channel_settings {
283 /* the driver is currently processing the queue */ 284 /* the driver is currently processing the queue */
284 int capturing; 285 int capturing;
285 286
286 struct semaphore sem; 287 struct mutex mutex;
287 spinlock_t capture_lock; 288 spinlock_t capture_lock;
288 289
289 unsigned int users; 290 unsigned int users;
@@ -1131,11 +1132,11 @@ static void vino_queue_free(struct vino_framebuffer_queue *q)
1131 if (q->type != VINO_MEMORY_MMAP) 1132 if (q->type != VINO_MEMORY_MMAP)
1132 return; 1133 return;
1133 1134
1134 down(&q->queue_sem); 1135 mutex_lock(&q->queue_mutex);
1135 1136
1136 vino_queue_free_with_count(q, q->length); 1137 vino_queue_free_with_count(q, q->length);
1137 1138
1138 up(&q->queue_sem); 1139 mutex_unlock(&q->queue_mutex);
1139} 1140}
1140 1141
1141static int vino_queue_init(struct vino_framebuffer_queue *q, 1142static int vino_queue_init(struct vino_framebuffer_queue *q,
@@ -1159,7 +1160,7 @@ static int vino_queue_init(struct vino_framebuffer_queue *q,
1159 if (*length < 1) 1160 if (*length < 1)
1160 return -EINVAL; 1161 return -EINVAL;
1161 1162
1162 down(&q->queue_sem); 1163 mutex_lock(&q->queue_mutex);
1163 1164
1164 if (*length > VINO_FRAMEBUFFER_COUNT_MAX) 1165 if (*length > VINO_FRAMEBUFFER_COUNT_MAX)
1165 *length = VINO_FRAMEBUFFER_COUNT_MAX; 1166 *length = VINO_FRAMEBUFFER_COUNT_MAX;
@@ -1211,7 +1212,7 @@ static int vino_queue_init(struct vino_framebuffer_queue *q,
1211 q->magic = VINO_QUEUE_MAGIC; 1212 q->magic = VINO_QUEUE_MAGIC;
1212 } 1213 }
1213 1214
1214 up(&q->queue_sem); 1215 mutex_unlock(&q->queue_mutex);
1215 1216
1216 return ret; 1217 return ret;
1217} 1218}
@@ -4045,7 +4046,7 @@ static int vino_open(struct inode *inode, struct file *file)
4045 dprintk("open(): channel = %c\n", 4046 dprintk("open(): channel = %c\n",
4046 (vcs->channel == VINO_CHANNEL_A) ? 'A' : 'B'); 4047 (vcs->channel == VINO_CHANNEL_A) ? 'A' : 'B');
4047 4048
4048 down(&vcs->sem); 4049 mutex_lock(&vcs->mutex);
4049 4050
4050 if (vcs->users) { 4051 if (vcs->users) {
4051 dprintk("open(): driver busy\n"); 4052 dprintk("open(): driver busy\n");
@@ -4062,7 +4063,7 @@ static int vino_open(struct inode *inode, struct file *file)
4062 vcs->users++; 4063 vcs->users++;
4063 4064
4064 out: 4065 out:
4065 up(&vcs->sem); 4066 mutex_unlock(&vcs->mutex);
4066 4067
4067 dprintk("open(): %s!\n", ret ? "failed" : "complete"); 4068 dprintk("open(): %s!\n", ret ? "failed" : "complete");
4068 4069
@@ -4075,7 +4076,7 @@ static int vino_close(struct inode *inode, struct file *file)
4075 struct vino_channel_settings *vcs = video_get_drvdata(dev); 4076 struct vino_channel_settings *vcs = video_get_drvdata(dev);
4076 dprintk("close():\n"); 4077 dprintk("close():\n");
4077 4078
4078 down(&vcs->sem); 4079 mutex_lock(&vcs->mutex);
4079 4080
4080 vcs->users--; 4081 vcs->users--;
4081 4082
@@ -4087,7 +4088,7 @@ static int vino_close(struct inode *inode, struct file *file)
4087 vino_queue_free(&vcs->fb_queue); 4088 vino_queue_free(&vcs->fb_queue);
4088 } 4089 }
4089 4090
4090 up(&vcs->sem); 4091 mutex_unlock(&vcs->mutex);
4091 4092
4092 return 0; 4093 return 0;
4093} 4094}
@@ -4130,7 +4131,7 @@ static int vino_mmap(struct file *file, struct vm_area_struct *vma)
4130 4131
4131 // TODO: reject mmap if already mapped 4132 // TODO: reject mmap if already mapped
4132 4133
4133 if (down_interruptible(&vcs->sem)) 4134 if (mutex_lock_interruptible(&vcs->mutex))
4134 return -EINTR; 4135 return -EINTR;
4135 4136
4136 if (vcs->reading) { 4137 if (vcs->reading) {
@@ -4214,7 +4215,7 @@ found:
4214 vma->vm_ops = &vino_vm_ops; 4215 vma->vm_ops = &vino_vm_ops;
4215 4216
4216out: 4217out:
4217 up(&vcs->sem); 4218 mutex_unlock(&vcs->mutex);
4218 4219
4219 return ret; 4220 return ret;
4220} 4221}
@@ -4374,12 +4375,12 @@ static int vino_ioctl(struct inode *inode, struct file *file,
4374 struct vino_channel_settings *vcs = video_get_drvdata(dev); 4375 struct vino_channel_settings *vcs = video_get_drvdata(dev);
4375 int ret; 4376 int ret;
4376 4377
4377 if (down_interruptible(&vcs->sem)) 4378 if (mutex_lock_interruptible(&vcs->mutex))
4378 return -EINTR; 4379 return -EINTR;
4379 4380
4380 ret = video_usercopy(inode, file, cmd, arg, vino_do_ioctl); 4381 ret = video_usercopy(inode, file, cmd, arg, vino_do_ioctl);
4381 4382
4382 up(&vcs->sem); 4383 mutex_unlock(&vcs->mutex);
4383 4384
4384 return ret; 4385 return ret;
4385} 4386}
@@ -4564,10 +4565,10 @@ static int vino_init_channel_settings(struct vino_channel_settings *vcs,
4564 4565
4565 vcs->capturing = 0; 4566 vcs->capturing = 0;
4566 4567
4567 init_MUTEX(&vcs->sem); 4568 mutex_init(&vcs->mutex);
4568 spin_lock_init(&vcs->capture_lock); 4569 spin_lock_init(&vcs->capture_lock);
4569 4570
4570 init_MUTEX(&vcs->fb_queue.queue_sem); 4571 mutex_init(&vcs->fb_queue.queue_mutex);
4571 spin_lock_init(&vcs->fb_queue.queue_lock); 4572 spin_lock_init(&vcs->fb_queue.queue_lock);
4572 init_waitqueue_head(&vcs->fb_queue.frame_wait_queue); 4573 init_waitqueue_head(&vcs->fb_queue.frame_wait_queue);
4573 4574
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 830528dce0ca..dc845f36fe49 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -100,6 +100,10 @@ static int max_interrupt_work = 10;
100static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 100static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
101static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n"; 101static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n";
102 102
103#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA))
104#define EL3_SUSPEND
105#endif
106
103#ifdef EL3_DEBUG 107#ifdef EL3_DEBUG
104static int el3_debug = EL3_DEBUG; 108static int el3_debug = EL3_DEBUG;
105#else 109#else
@@ -174,9 +178,6 @@ struct el3_private {
174 /* skb send-queue */ 178 /* skb send-queue */
175 int head, size; 179 int head, size;
176 struct sk_buff *queue[SKB_QUEUE_SIZE]; 180 struct sk_buff *queue[SKB_QUEUE_SIZE];
177#ifdef CONFIG_PM_LEGACY
178 struct pm_dev *pmdev;
179#endif
180 enum { 181 enum {
181 EL3_MCA, 182 EL3_MCA,
182 EL3_PNP, 183 EL3_PNP,
@@ -201,11 +202,15 @@ static void el3_tx_timeout (struct net_device *dev);
201static void el3_down(struct net_device *dev); 202static void el3_down(struct net_device *dev);
202static void el3_up(struct net_device *dev); 203static void el3_up(struct net_device *dev);
203static struct ethtool_ops ethtool_ops; 204static struct ethtool_ops ethtool_ops;
204#ifdef CONFIG_PM_LEGACY 205#ifdef EL3_SUSPEND
205static int el3_suspend(struct pm_dev *pdev); 206static int el3_suspend(struct device *, pm_message_t);
206static int el3_resume(struct pm_dev *pdev); 207static int el3_resume(struct device *);
207static int el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data); 208#else
209#define el3_suspend NULL
210#define el3_resume NULL
208#endif 211#endif
212
213
209/* generic device remove for all device types */ 214/* generic device remove for all device types */
210#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 215#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
211static int el3_device_remove (struct device *device); 216static int el3_device_remove (struct device *device);
@@ -229,7 +234,9 @@ static struct eisa_driver el3_eisa_driver = {
229 .driver = { 234 .driver = {
230 .name = "3c509", 235 .name = "3c509",
231 .probe = el3_eisa_probe, 236 .probe = el3_eisa_probe,
232 .remove = __devexit_p (el3_device_remove) 237 .remove = __devexit_p (el3_device_remove),
238 .suspend = el3_suspend,
239 .resume = el3_resume,
233 } 240 }
234}; 241};
235#endif 242#endif
@@ -262,6 +269,8 @@ static struct mca_driver el3_mca_driver = {
262 .bus = &mca_bus_type, 269 .bus = &mca_bus_type,
263 .probe = el3_mca_probe, 270 .probe = el3_mca_probe,
264 .remove = __devexit_p(el3_device_remove), 271 .remove = __devexit_p(el3_device_remove),
272 .suspend = el3_suspend,
273 .resume = el3_resume,
265 }, 274 },
266}; 275};
267#endif /* CONFIG_MCA */ 276#endif /* CONFIG_MCA */
@@ -362,10 +371,6 @@ static void el3_common_remove (struct net_device *dev)
362 struct el3_private *lp = netdev_priv(dev); 371 struct el3_private *lp = netdev_priv(dev);
363 372
364 (void) lp; /* Keep gcc quiet... */ 373 (void) lp; /* Keep gcc quiet... */
365#ifdef CONFIG_PM_LEGACY
366 if (lp->pmdev)
367 pm_unregister(lp->pmdev);
368#endif
369#if defined(__ISAPNP__) 374#if defined(__ISAPNP__)
370 if (lp->type == EL3_PNP) 375 if (lp->type == EL3_PNP)
371 pnp_device_detach(to_pnp_dev(lp->dev)); 376 pnp_device_detach(to_pnp_dev(lp->dev));
@@ -572,16 +577,6 @@ no_pnp:
572 if (err) 577 if (err)
573 goto out1; 578 goto out1;
574 579
575#ifdef CONFIG_PM_LEGACY
576 /* register power management */
577 lp->pmdev = pm_register(PM_ISA_DEV, card_idx, el3_pm_callback);
578 if (lp->pmdev) {
579 struct pm_dev *p;
580 p = lp->pmdev;
581 p->data = (struct net_device *)dev;
582 }
583#endif
584
585 el3_cards++; 580 el3_cards++;
586 lp->next_dev = el3_root_dev; 581 lp->next_dev = el3_root_dev;
587 el3_root_dev = dev; 582 el3_root_dev = dev;
@@ -1480,20 +1475,17 @@ el3_up(struct net_device *dev)
1480} 1475}
1481 1476
1482/* Power Management support functions */ 1477/* Power Management support functions */
1483#ifdef CONFIG_PM_LEGACY 1478#ifdef EL3_SUSPEND
1484 1479
1485static int 1480static int
1486el3_suspend(struct pm_dev *pdev) 1481el3_suspend(struct device *pdev, pm_message_t state)
1487{ 1482{
1488 unsigned long flags; 1483 unsigned long flags;
1489 struct net_device *dev; 1484 struct net_device *dev;
1490 struct el3_private *lp; 1485 struct el3_private *lp;
1491 int ioaddr; 1486 int ioaddr;
1492 1487
1493 if (!pdev && !pdev->data) 1488 dev = pdev->driver_data;
1494 return -EINVAL;
1495
1496 dev = (struct net_device *)pdev->data;
1497 lp = netdev_priv(dev); 1489 lp = netdev_priv(dev);
1498 ioaddr = dev->base_addr; 1490 ioaddr = dev->base_addr;
1499 1491
@@ -1510,17 +1502,14 @@ el3_suspend(struct pm_dev *pdev)
1510} 1502}
1511 1503
1512static int 1504static int
1513el3_resume(struct pm_dev *pdev) 1505el3_resume(struct device *pdev)
1514{ 1506{
1515 unsigned long flags; 1507 unsigned long flags;
1516 struct net_device *dev; 1508 struct net_device *dev;
1517 struct el3_private *lp; 1509 struct el3_private *lp;
1518 int ioaddr; 1510 int ioaddr;
1519 1511
1520 if (!pdev && !pdev->data) 1512 dev = pdev->driver_data;
1521 return -EINVAL;
1522
1523 dev = (struct net_device *)pdev->data;
1524 lp = netdev_priv(dev); 1513 lp = netdev_priv(dev);
1525 ioaddr = dev->base_addr; 1514 ioaddr = dev->base_addr;
1526 1515
@@ -1536,20 +1525,7 @@ el3_resume(struct pm_dev *pdev)
1536 return 0; 1525 return 0;
1537} 1526}
1538 1527
1539static int 1528#endif /* EL3_SUSPEND */
1540el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data)
1541{
1542 switch (rqst) {
1543 case PM_SUSPEND:
1544 return el3_suspend(pdev);
1545
1546 case PM_RESUME:
1547 return el3_resume(pdev);
1548 }
1549 return 0;
1550}
1551
1552#endif /* CONFIG_PM_LEGACY */
1553 1529
1554/* Parameters that may be passed into the module. */ 1530/* Parameters that may be passed into the module. */
1555static int debug = -1; 1531static int debug = -1;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 9e1fe2e0478c..b40885d41680 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -105,6 +105,7 @@
105#include <linux/mca-legacy.h> 105#include <linux/mca-legacy.h>
106#include <linux/ethtool.h> 106#include <linux/ethtool.h>
107#include <linux/bitops.h> 107#include <linux/bitops.h>
108#include <linux/jiffies.h>
108 109
109#include <asm/uaccess.h> 110#include <asm/uaccess.h>
110#include <asm/processor.h> 111#include <asm/processor.h>
@@ -658,7 +659,7 @@ static int init586(struct net_device *dev)
658 659
659 s = jiffies; /* warning: only active with interrupts on !! */ 660 s = jiffies; /* warning: only active with interrupts on !! */
660 while (!(cfg_cmd->cmd_status & STAT_COMPL)) { 661 while (!(cfg_cmd->cmd_status & STAT_COMPL)) {
661 if (jiffies - s > 30*HZ/100) 662 if (time_after(jiffies, s + 30*HZ/100))
662 break; 663 break;
663 } 664 }
664 665
@@ -684,7 +685,7 @@ static int init586(struct net_device *dev)
684 685
685 s = jiffies; 686 s = jiffies;
686 while (!(ias_cmd->cmd_status & STAT_COMPL)) { 687 while (!(ias_cmd->cmd_status & STAT_COMPL)) {
687 if (jiffies - s > 30*HZ/100) 688 if (time_after(jiffies, s + 30*HZ/100))
688 break; 689 break;
689 } 690 }
690 691
@@ -709,7 +710,7 @@ static int init586(struct net_device *dev)
709 710
710 s = jiffies; 711 s = jiffies;
711 while (!(tdr_cmd->cmd_status & STAT_COMPL)) { 712 while (!(tdr_cmd->cmd_status & STAT_COMPL)) {
712 if (jiffies - s > 30*HZ/100) { 713 if (time_after(jiffies, s + 30*HZ/100)) {
713 printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__); 714 printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
714 result = 1; 715 result = 1;
715 break; 716 break;
@@ -798,7 +799,7 @@ static int init586(struct net_device *dev)
798 elmc_id_attn586(); 799 elmc_id_attn586();
799 s = jiffies; 800 s = jiffies;
800 while (!(mc_cmd->cmd_status & STAT_COMPL)) { 801 while (!(mc_cmd->cmd_status & STAT_COMPL)) {
801 if (jiffies - s > 30*HZ/100) 802 if (time_after(jiffies, s + 30*HZ/100))
802 break; 803 break;
803 } 804 }
804 if (!(mc_cmd->cmd_status & STAT_COMPL)) { 805 if (!(mc_cmd->cmd_status & STAT_COMPL)) {
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 7f47124f118d..5d11a06ecb2c 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -258,6 +258,7 @@ static int vortex_debug = 1;
258#include <linux/highmem.h> 258#include <linux/highmem.h>
259#include <linux/eisa.h> 259#include <linux/eisa.h>
260#include <linux/bitops.h> 260#include <linux/bitops.h>
261#include <linux/jiffies.h>
261#include <asm/irq.h> /* For NR_IRQS only. */ 262#include <asm/irq.h> /* For NR_IRQS only. */
262#include <asm/io.h> 263#include <asm/io.h>
263#include <asm/uaccess.h> 264#include <asm/uaccess.h>
@@ -841,7 +842,7 @@ enum xcvr_types {
841 XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, 842 XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
842}; 843};
843 844
844static struct media_table { 845static const struct media_table {
845 char *name; 846 char *name;
846 unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ 847 unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
847 mask:8, /* The transceiver-present bit in Wn3_Config.*/ 848 mask:8, /* The transceiver-present bit in Wn3_Config.*/
@@ -1445,7 +1446,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1445 } 1446 }
1446 1447
1447 { 1448 {
1448 static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 1449 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1449 unsigned int config; 1450 unsigned int config;
1450 EL3WINDOW(3); 1451 EL3WINDOW(3);
1451 vp->available_media = ioread16(ioaddr + Wn3_Options); 1452 vp->available_media = ioread16(ioaddr + Wn3_Options);
@@ -2724,7 +2725,7 @@ boomerang_rx(struct net_device *dev)
2724 skb = dev_alloc_skb(PKT_BUF_SZ); 2725 skb = dev_alloc_skb(PKT_BUF_SZ);
2725 if (skb == NULL) { 2726 if (skb == NULL) {
2726 static unsigned long last_jif; 2727 static unsigned long last_jif;
2727 if ((jiffies - last_jif) > 10 * HZ) { 2728 if (time_after(jiffies, last_jif + 10 * HZ)) {
2728 printk(KERN_WARNING "%s: memory shortage\n", dev->name); 2729 printk(KERN_WARNING "%s: memory shortage\n", dev->name);
2729 last_jif = jiffies; 2730 last_jif = jiffies;
2730 } 2731 }
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 18b027e73f28..86633c5f1a4b 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -29,7 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/irq.h> 32#include <asm/irq.h>
33/* Used for the temporal inet entries and routing */ 33/* Used for the temporal inet entries and routing */
34#include <linux/socket.h> 34#include <linux/socket.h>
35#include <linux/bitops.h> 35#include <linux/bitops.h>
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index dd410496aadb..ce99845d8266 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1276,7 +1276,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
1276} 1276}
1277#endif /* BROKEN */ 1277#endif /* BROKEN */
1278 1278
1279static char mii_2_8139_map[8] = { 1279static const char mii_2_8139_map[8] = {
1280 BasicModeCtrl, 1280 BasicModeCtrl,
1281 BasicModeStatus, 1281 BasicModeStatus,
1282 0, 1282 0,
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 2beac55b57d6..e58d4c50c2e1 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -229,7 +229,7 @@ typedef enum {
229 229
230 230
231/* indexed by board_t, above */ 231/* indexed by board_t, above */
232static struct { 232static const struct {
233 const char *name; 233 const char *name;
234 u32 hw_flags; 234 u32 hw_flags;
235} board_info[] __devinitdata = { 235} board_info[] __devinitdata = {
@@ -1192,7 +1192,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
1192#define mdio_delay() RTL_R8(Config4) 1192#define mdio_delay() RTL_R8(Config4)
1193 1193
1194 1194
1195static char mii_2_8139_map[8] = { 1195static const char mii_2_8139_map[8] = {
1196 BasicModeCtrl, 1196 BasicModeCtrl,
1197 BasicModeStatus, 1197 BasicModeStatus,
1198 0, 1198 0,
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 13b745b39667..da0c878dcba8 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -614,7 +614,7 @@ static void rebuild_rx_bufs(struct net_device *dev)
614static int init_i596_mem(struct net_device *dev) 614static int init_i596_mem(struct net_device *dev)
615{ 615{
616 struct i596_private *lp = dev->priv; 616 struct i596_private *lp = dev->priv;
617#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) 617#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
618 short ioaddr = dev->base_addr; 618 short ioaddr = dev->base_addr;
619#endif 619#endif
620 unsigned long flags; 620 unsigned long flags;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index aa633fa95e64..e0b11095b9da 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -66,7 +66,7 @@ config BONDING
66 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux. 66 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux.
67 67
68 The driver supports multiple bonding modes to allow for both high 68 The driver supports multiple bonding modes to allow for both high
69 perfomance and high availability operation. 69 performance and high availability operation.
70 70
71 Refer to <file:Documentation/networking/bonding.txt> for more 71 Refer to <file:Documentation/networking/bonding.txt> for more
72 information. 72 information.
@@ -698,8 +698,8 @@ config VORTEX
698 depends on NET_VENDOR_3COM && (PCI || EISA) 698 depends on NET_VENDOR_3COM && (PCI || EISA)
699 select MII 699 select MII
700 ---help--- 700 ---help---
701 This option enables driver support for a large number of 10mbps and 701 This option enables driver support for a large number of 10Mbps and
702 10/100mbps EISA, PCI and PCMCIA 3Com network cards: 702 10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
703 703
704 "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI 704 "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
705 "Boomerang" (EtherLink XL 3c900 or 3c905) PCI 705 "Boomerang" (EtherLink XL 3c900 or 3c905) PCI
@@ -1021,7 +1021,7 @@ config EEXPRESS_PRO
1021 depends on NET_ISA 1021 depends on NET_ISA
1022 ---help--- 1022 ---help---
1023 If you have a network (Ethernet) card of this type, say Y. This 1023 If you have a network (Ethernet) card of this type, say Y. This
1024 driver supports intel i82595{FX,TX} based boards. Note however 1024 driver supports Intel i82595{FX,TX} based boards. Note however
1025 that the EtherExpress PRO/100 Ethernet card has its own separate 1025 that the EtherExpress PRO/100 Ethernet card has its own separate
1026 driver. Please read the Ethernet-HOWTO, available from 1026 driver. Please read the Ethernet-HOWTO, available from
1027 <http://www.tldp.org/docs.html#howto>. 1027 <http://www.tldp.org/docs.html#howto>.
@@ -1208,7 +1208,7 @@ config IBM_EMAC_RX_SKB_HEADROOM
1208 help 1208 help
1209 Additional receive skb headroom. Note, that driver 1209 Additional receive skb headroom. Note, that driver
1210 will always reserve at least 2 bytes to make IP header 1210 will always reserve at least 2 bytes to make IP header
1211 aligned, so usualy there is no need to add any additional 1211 aligned, so usually there is no need to add any additional
1212 headroom. 1212 headroom.
1213 1213
1214 If unsure, set to 0. 1214 If unsure, set to 0.
@@ -1372,8 +1372,8 @@ config B44
1372 called b44. 1372 called b44.
1373 1373
1374config FORCEDETH 1374config FORCEDETH
1375 tristate "Reverse Engineered nForce Ethernet support (EXPERIMENTAL)" 1375 tristate "nForce Ethernet support"
1376 depends on NET_PCI && PCI && EXPERIMENTAL 1376 depends on NET_PCI && PCI
1377 help 1377 help
1378 If you have a network (Ethernet) controller of this type, say Y and 1378 If you have a network (Ethernet) controller of this type, say Y and
1379 read the Ethernet-HOWTO, available from 1379 read the Ethernet-HOWTO, available from
@@ -1614,11 +1614,7 @@ config SIS900
1614 ---help--- 1614 ---help---
1615 This is a driver for the Fast Ethernet PCI network cards based on 1615 This is a driver for the Fast Ethernet PCI network cards based on
1616 the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in 1616 the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in
1617 SiS 630 and SiS 540 chipsets. If you have one of those, say Y and 1617 SiS 630 and SiS 540 chipsets.
1618 read the Ethernet-HOWTO, available at
1619 <http://www.tldp.org/docs.html#howto>. Please read
1620 <file:Documentation/networking/sis900.txt> and comments at the
1621 beginning of <file:drivers/net/sis900.c> for more information.
1622 1618
1623 This driver also supports AMD 79C901 HomePNA so that you can use 1619 This driver also supports AMD 79C901 HomePNA so that you can use
1624 your phone line as a network cable. 1620 your phone line as a network cable.
@@ -1934,7 +1930,7 @@ config MYRI_SBUS
1934 will be called myri_sbus. This is recommended. 1930 will be called myri_sbus. This is recommended.
1935 1931
1936config NS83820 1932config NS83820
1937 tristate "National Semiconduct DP83820 support" 1933 tristate "National Semiconductor DP83820 support"
1938 depends on PCI 1934 depends on PCI
1939 help 1935 help
1940 This is a driver for the National Semiconductor DP83820 series 1936 This is a driver for the National Semiconductor DP83820 series
@@ -2195,6 +2191,7 @@ config GFAR_NAPI
2195config MV643XX_ETH 2191config MV643XX_ETH
2196 tristate "MV-643XX Ethernet support" 2192 tristate "MV-643XX Ethernet support"
2197 depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM 2193 depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM
2194 select MII
2198 help 2195 help
2199 This driver supports the gigabit Ethernet on the Marvell MV643XX 2196 This driver supports the gigabit Ethernet on the Marvell MV643XX
2200 chipset which is used in the Momenco Ocelot C and Jaguar ATX and 2197 chipset which is used in the Momenco Ocelot C and Jaguar ATX and
@@ -2514,7 +2511,7 @@ config PPP_FILTER
2514 Say Y here if you want to be able to filter the packets passing over 2511 Say Y here if you want to be able to filter the packets passing over
2515 PPP interfaces. This allows you to control which packets count as 2512 PPP interfaces. This allows you to control which packets count as
2516 activity (i.e. which packets will reset the idle timer or bring up 2513 activity (i.e. which packets will reset the idle timer or bring up
2517 a demand-dialled link) and which packets are to be dropped entirely. 2514 a demand-dialed link) and which packets are to be dropped entirely.
2518 You need to say Y here if you wish to use the pass-filter and 2515 You need to say Y here if you wish to use the pass-filter and
2519 active-filter options to pppd. 2516 active-filter options to pppd.
2520 2517
@@ -2702,8 +2699,8 @@ config SHAPER
2702 <file:Documentation/networking/shaper.txt> for more information. 2699 <file:Documentation/networking/shaper.txt> for more information.
2703 2700
2704 An alternative to this traffic shaper is the experimental 2701 An alternative to this traffic shaper is the experimental
2705 Class-Based Queueing (CBQ) scheduling support which you get if you 2702 Class-Based Queuing (CBQ) scheduling support which you get if you
2706 say Y to "QoS and/or fair queueing" above. 2703 say Y to "QoS and/or fair queuing" above.
2707 2704
2708 To compile this driver as a module, choose M here: the module 2705 To compile this driver as a module, choose M here: the module
2709 will be called shaper. If unsure, say N. 2706 will be called shaper. If unsure, say N.
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index a94216b87184..b9820b86cdcc 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -36,6 +36,7 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <linux/jiffies.h>
39 40
40#include <asm/system.h> 41#include <asm/system.h>
41#include <asm/io.h> 42#include <asm/io.h>
@@ -216,7 +217,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
216 outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); 217 outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
217 218
218 while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) 219 while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
219 if (jiffies - reset_start_time > 2*HZ/100) { 220 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
220 printk(" not found (no reset ack).\n"); 221 printk(" not found (no reset ack).\n");
221 return -ENODEV; 222 return -ENODEV;
222 } 223 }
@@ -382,7 +383,7 @@ apne_reset_8390(struct net_device *dev)
382 383
383 /* This check _should_not_ be necessary, omit eventually. */ 384 /* This check _should_not_ be necessary, omit eventually. */
384 while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) 385 while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
385 if (jiffies - reset_start_time > 2*HZ/100) { 386 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
386 printk("%s: ne_reset_8390() did not complete.\n", dev->name); 387 printk("%s: ne_reset_8390() did not complete.\n", dev->name);
387 break; 388 break;
388 } 389 }
@@ -530,7 +531,7 @@ apne_block_output(struct net_device *dev, int count,
530 dma_start = jiffies; 531 dma_start = jiffies;
531 532
532 while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) 533 while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
533 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 534 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
534 printk("%s: timeout waiting for Tx RDC.\n", dev->name); 535 printk("%s: timeout waiting for Tx RDC.\n", dev->name);
535 apne_reset_8390(dev); 536 apne_reset_8390(dev);
536 NS8390_init(dev,1); 537 NS8390_init(dev,1);
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index 948de2532a1e..7284ccad0b91 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -68,10 +68,10 @@ config ARCNET_CAP
68 packet is stuffed with an extra 4 byte "cookie" which doesn't 68 packet is stuffed with an extra 4 byte "cookie" which doesn't
69 actually appear on the network. After transmit the driver will send 69 actually appear on the network. After transmit the driver will send
70 back a packet with protocol byte 0 containing the status of the 70 back a packet with protocol byte 0 containing the status of the
71 transmition: 71 transmission:
72 0=no hardware acknowledge 72 0=no hardware acknowledge
73 1=excessive nak 73 1=excessive nak
74 2=transmition accepted by the reciever hardware 74 2=transmission accepted by the receiver hardware
75 75
76 Received packets are also stuffed with the extra 4 bytes but it will 76 Received packets are also stuffed with the extra 4 bytes but it will
77 be random data. 77 be random data.
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index e1ea29b0cd14..e7555d4e6ff1 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -42,7 +42,7 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
42static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, 42static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
43 int bufnum); 43 int bufnum);
44 44
45struct ArcProto rawmode_proto = 45static struct ArcProto rawmode_proto =
46{ 46{
47 .suffix = 'r', 47 .suffix = 'r',
48 .mtu = XMTU, 48 .mtu = XMTU,
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 38c3f033f739..8c8d6c453c45 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -97,25 +97,44 @@ static int __init arcrimi_probe(struct net_device *dev)
97 "must specify the shmem and irq!\n"); 97 "must specify the shmem and irq!\n");
98 return -ENODEV; 98 return -ENODEV;
99 } 99 }
100 if (dev->dev_addr[0] == 0) {
101 BUGMSG(D_NORMAL, "You need to specify your card's station "
102 "ID!\n");
103 return -ENODEV;
104 }
100 /* 105 /*
101 * Grab the memory region at mem_start for BUFFER_SIZE bytes. 106 * Grab the memory region at mem_start for MIRROR_SIZE bytes.
102 * Later in arcrimi_found() the real size will be determined 107 * Later in arcrimi_found() the real size will be determined
103 * and this reserve will be released and the correct size 108 * and this reserve will be released and the correct size
104 * will be taken. 109 * will be taken.
105 */ 110 */
106 if (!request_mem_region(dev->mem_start, BUFFER_SIZE, "arcnet (90xx)")) { 111 if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) {
107 BUGMSG(D_NORMAL, "Card memory already allocated\n"); 112 BUGMSG(D_NORMAL, "Card memory already allocated\n");
108 return -ENODEV; 113 return -ENODEV;
109 } 114 }
110 if (dev->dev_addr[0] == 0) {
111 release_mem_region(dev->mem_start, BUFFER_SIZE);
112 BUGMSG(D_NORMAL, "You need to specify your card's station "
113 "ID!\n");
114 return -ENODEV;
115 }
116 return arcrimi_found(dev); 115 return arcrimi_found(dev);
117} 116}
118 117
118static int check_mirror(unsigned long addr, size_t size)
119{
120 void __iomem *p;
121 int res = -1;
122
123 if (!request_mem_region(addr, size, "arcnet (90xx)"))
124 return -1;
125
126 p = ioremap(addr, size);
127 if (p) {
128 if (readb(p) == TESTvalue)
129 res = 1;
130 else
131 res = 0;
132 iounmap(p);
133 }
134
135 release_mem_region(addr, size);
136 return res;
137}
119 138
120/* 139/*
121 * Set up the struct net_device associated with this card. Called after 140 * Set up the struct net_device associated with this card. Called after
@@ -125,19 +144,28 @@ static int __init arcrimi_found(struct net_device *dev)
125{ 144{
126 struct arcnet_local *lp; 145 struct arcnet_local *lp;
127 unsigned long first_mirror, last_mirror, shmem; 146 unsigned long first_mirror, last_mirror, shmem;
147 void __iomem *p;
128 int mirror_size; 148 int mirror_size;
129 int err; 149 int err;
130 150
151 p = ioremap(dev->mem_start, MIRROR_SIZE);
152 if (!p) {
153 release_mem_region(dev->mem_start, MIRROR_SIZE);
154 BUGMSG(D_NORMAL, "Can't ioremap\n");
155 return -ENODEV;
156 }
157
131 /* reserve the irq */ 158 /* reserve the irq */
132 if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { 159 if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) {
133 release_mem_region(dev->mem_start, BUFFER_SIZE); 160 iounmap(p);
161 release_mem_region(dev->mem_start, MIRROR_SIZE);
134 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); 162 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
135 return -ENODEV; 163 return -ENODEV;
136 } 164 }
137 165
138 shmem = dev->mem_start; 166 shmem = dev->mem_start;
139 isa_writeb(TESTvalue, shmem); 167 writeb(TESTvalue, p);
140 isa_writeb(dev->dev_addr[0], shmem + 1); /* actually the node ID */ 168 writeb(dev->dev_addr[0], p + 1); /* actually the node ID */
141 169
142 /* find the real shared memory start/end points, including mirrors */ 170 /* find the real shared memory start/end points, including mirrors */
143 171
@@ -146,17 +174,18 @@ static int __init arcrimi_found(struct net_device *dev)
146 * 2k (or there are no mirrors at all) but on some, it's 4k. 174 * 2k (or there are no mirrors at all) but on some, it's 4k.
147 */ 175 */
148 mirror_size = MIRROR_SIZE; 176 mirror_size = MIRROR_SIZE;
149 if (isa_readb(shmem) == TESTvalue 177 if (readb(p) == TESTvalue
150 && isa_readb(shmem - mirror_size) != TESTvalue 178 && check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0
151 && isa_readb(shmem - 2 * mirror_size) == TESTvalue) 179 && check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1)
152 mirror_size *= 2; 180 mirror_size = 2 * MIRROR_SIZE;
153 181
154 first_mirror = last_mirror = shmem; 182 first_mirror = shmem - mirror_size;
155 while (isa_readb(first_mirror) == TESTvalue) 183 while (check_mirror(first_mirror, mirror_size) == 1)
156 first_mirror -= mirror_size; 184 first_mirror -= mirror_size;
157 first_mirror += mirror_size; 185 first_mirror += mirror_size;
158 186
159 while (isa_readb(last_mirror) == TESTvalue) 187 last_mirror = shmem + mirror_size;
188 while (check_mirror(last_mirror, mirror_size) == 1)
160 last_mirror += mirror_size; 189 last_mirror += mirror_size;
161 last_mirror -= mirror_size; 190 last_mirror -= mirror_size;
162 191
@@ -181,7 +210,8 @@ static int __init arcrimi_found(struct net_device *dev)
181 * with the correct size. There is a VERY slim chance this could 210 * with the correct size. There is a VERY slim chance this could
182 * fail. 211 * fail.
183 */ 212 */
184 release_mem_region(shmem, BUFFER_SIZE); 213 iounmap(p);
214 release_mem_region(shmem, MIRROR_SIZE);
185 if (!request_mem_region(dev->mem_start, 215 if (!request_mem_region(dev->mem_start,
186 dev->mem_end - dev->mem_start + 1, 216 dev->mem_end - dev->mem_start + 1,
187 "arcnet (90xx)")) { 217 "arcnet (90xx)")) {
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 12ef52c193a3..64e2caf3083d 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -52,6 +52,7 @@
52#include <net/arp.h> 52#include <net/arp.h>
53#include <linux/init.h> 53#include <linux/init.h>
54#include <linux/arcdevice.h> 54#include <linux/arcdevice.h>
55#include <linux/jiffies.h>
55 56
56/* "do nothing" functions for protocol drivers */ 57/* "do nothing" functions for protocol drivers */
57static void null_rx(struct net_device *dev, int bufnum, 58static void null_rx(struct net_device *dev, int bufnum,
@@ -61,6 +62,7 @@ static int null_build_header(struct sk_buff *skb, struct net_device *dev,
61static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, 62static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
62 int length, int bufnum); 63 int length, int bufnum);
63 64
65static void arcnet_rx(struct net_device *dev, int bufnum);
64 66
65/* 67/*
66 * one ArcProto per possible proto ID. None of the elements of 68 * one ArcProto per possible proto ID. None of the elements of
@@ -71,7 +73,7 @@ static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
71 struct ArcProto *arc_proto_map[256], *arc_proto_default, 73 struct ArcProto *arc_proto_map[256], *arc_proto_default,
72 *arc_bcast_proto, *arc_raw_proto; 74 *arc_bcast_proto, *arc_raw_proto;
73 75
74struct ArcProto arc_proto_null = 76static struct ArcProto arc_proto_null =
75{ 77{
76 .suffix = '?', 78 .suffix = '?',
77 .mtu = XMTU, 79 .mtu = XMTU,
@@ -90,7 +92,6 @@ EXPORT_SYMBOL(arc_proto_map);
90EXPORT_SYMBOL(arc_proto_default); 92EXPORT_SYMBOL(arc_proto_default);
91EXPORT_SYMBOL(arc_bcast_proto); 93EXPORT_SYMBOL(arc_bcast_proto);
92EXPORT_SYMBOL(arc_raw_proto); 94EXPORT_SYMBOL(arc_raw_proto);
93EXPORT_SYMBOL(arc_proto_null);
94EXPORT_SYMBOL(arcnet_unregister_proto); 95EXPORT_SYMBOL(arcnet_unregister_proto);
95EXPORT_SYMBOL(arcnet_debug); 96EXPORT_SYMBOL(arcnet_debug);
96EXPORT_SYMBOL(alloc_arcdev); 97EXPORT_SYMBOL(alloc_arcdev);
@@ -118,7 +119,7 @@ static int __init arcnet_init(void)
118 119
119 arcnet_debug = debug; 120 arcnet_debug = debug;
120 121
121 printk(VERSION); 122 printk("arcnet loaded.\n");
122 123
123#ifdef ALPHA_WARNING 124#ifdef ALPHA_WARNING
124 BUGLVL(D_EXTRA) { 125 BUGLVL(D_EXTRA) {
@@ -178,8 +179,8 @@ EXPORT_SYMBOL(arcnet_dump_skb);
178 * Dump the contents of an ARCnet buffer 179 * Dump the contents of an ARCnet buffer
179 */ 180 */
180#if (ARCNET_DEBUG_MAX & (D_RX | D_TX)) 181#if (ARCNET_DEBUG_MAX & (D_RX | D_TX))
181void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, 182static void arcnet_dump_packet(struct net_device *dev, int bufnum,
182 int take_arcnet_lock) 183 char *desc, int take_arcnet_lock)
183{ 184{
184 struct arcnet_local *lp = dev->priv; 185 struct arcnet_local *lp = dev->priv;
185 int i, length; 186 int i, length;
@@ -208,7 +209,10 @@ void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc,
208 209
209} 210}
210 211
211EXPORT_SYMBOL(arcnet_dump_packet); 212#else
213
214#define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) do { } while (0)
215
212#endif 216#endif
213 217
214 218
@@ -733,7 +737,7 @@ static void arcnet_timeout(struct net_device *dev)
733 737
734 spin_unlock_irqrestore(&lp->lock, flags); 738 spin_unlock_irqrestore(&lp->lock, flags);
735 739
736 if (jiffies - lp->last_timeout > 10*HZ) { 740 if (time_after(jiffies, lp->last_timeout + 10*HZ)) {
737 BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n", 741 BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n",
738 msg, status, lp->intmask, lp->lasttrans_dest); 742 msg, status, lp->intmask, lp->lasttrans_dest);
739 lp->last_timeout = jiffies; 743 lp->last_timeout = jiffies;
@@ -996,7 +1000,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
996 * This is a generic packet receiver that calls arcnet??_rx depending on the 1000 * This is a generic packet receiver that calls arcnet??_rx depending on the
997 * protocol ID found. 1001 * protocol ID found.
998 */ 1002 */
999void arcnet_rx(struct net_device *dev, int bufnum) 1003static void arcnet_rx(struct net_device *dev, int bufnum)
1000{ 1004{
1001 struct arcnet_local *lp = dev->priv; 1005 struct arcnet_local *lp = dev->priv;
1002 struct archdr pkt; 1006 struct archdr pkt;
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 6c2c9b9ac6db..43150b2bd13f 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -53,7 +53,7 @@
53 53
54 54
55/* Internal function declarations */ 55/* Internal function declarations */
56static int com90xx_found(int ioaddr, int airq, u_long shmem); 56static int com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *);
57static void com90xx_command(struct net_device *dev, int command); 57static void com90xx_command(struct net_device *dev, int command);
58static int com90xx_status(struct net_device *dev); 58static int com90xx_status(struct net_device *dev);
59static void com90xx_setmask(struct net_device *dev, int mask); 59static void com90xx_setmask(struct net_device *dev, int mask);
@@ -116,14 +116,26 @@ static void __init com90xx_probe(void)
116 unsigned long airqmask; 116 unsigned long airqmask;
117 int ports[(0x3f0 - 0x200) / 16 + 1] = 117 int ports[(0x3f0 - 0x200) / 16 + 1] =
118 {0}; 118 {0};
119 u_long shmems[(0xFF800 - 0xA0000) / 2048 + 1] = 119 unsigned long *shmems;
120 {0}; 120 void __iomem **iomem;
121 int numports, numshmems, *port; 121 int numports, numshmems, *port;
122 u_long *p; 122 u_long *p;
123 int index;
123 124
124 if (!io && !irq && !shmem && !*device && com90xx_skip_probe) 125 if (!io && !irq && !shmem && !*device && com90xx_skip_probe)
125 return; 126 return;
126 127
128 shmems = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(unsigned long),
129 GFP_KERNEL);
130 if (!shmems)
131 return;
132 iomem = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(void __iomem *),
133 GFP_KERNEL);
134 if (!iomem) {
135 kfree(shmems);
136 return;
137 }
138
127 BUGLVL(D_NORMAL) printk(VERSION); 139 BUGLVL(D_NORMAL) printk(VERSION);
128 140
129 /* set up the arrays where we'll store the possible probe addresses */ 141 /* set up the arrays where we'll store the possible probe addresses */
@@ -179,6 +191,8 @@ static void __init com90xx_probe(void)
179 191
180 if (!numports) { 192 if (!numports) {
181 BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n"); 193 BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n");
194 kfree(shmems);
195 kfree(iomem);
182 return; 196 return;
183 } 197 }
184 /* Stage 2: we have now reset any possible ARCnet cards, so we can't 198 /* Stage 2: we have now reset any possible ARCnet cards, so we can't
@@ -202,8 +216,8 @@ static void __init com90xx_probe(void)
202 * 0xD1 byte in the right place, or are read-only. 216 * 0xD1 byte in the right place, or are read-only.
203 */ 217 */
204 numprint = -1; 218 numprint = -1;
205 for (p = &shmems[0]; p < shmems + numshmems; p++) { 219 for (index = 0, p = &shmems[0]; index < numshmems; p++, index++) {
206 u_long ptr = *p; 220 void __iomem *base;
207 221
208 numprint++; 222 numprint++;
209 numprint %= 8; 223 numprint %= 8;
@@ -213,38 +227,49 @@ static void __init com90xx_probe(void)
213 } 227 }
214 BUGMSG2(D_INIT, "%lXh ", *p); 228 BUGMSG2(D_INIT, "%lXh ", *p);
215 229
216 if (!request_mem_region(*p, BUFFER_SIZE, "arcnet (90xx)")) { 230 if (!request_mem_region(*p, MIRROR_SIZE, "arcnet (90xx)")) {
217 BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n"); 231 BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n");
218 BUGMSG2(D_INIT_REASONS, "Stage 3: "); 232 BUGMSG2(D_INIT_REASONS, "Stage 3: ");
219 BUGLVL(D_INIT_REASONS) numprint = 0; 233 BUGLVL(D_INIT_REASONS) numprint = 0;
220 *p-- = shmems[--numshmems]; 234 goto out;
221 continue; 235 }
236 base = ioremap(*p, MIRROR_SIZE);
237 if (!base) {
238 BUGMSG2(D_INIT_REASONS, "(ioremap)\n");
239 BUGMSG2(D_INIT_REASONS, "Stage 3: ");
240 BUGLVL(D_INIT_REASONS) numprint = 0;
241 goto out1;
222 } 242 }
223 if (isa_readb(ptr) != TESTvalue) { 243 if (readb(base) != TESTvalue) {
224 BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n", 244 BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n",
225 isa_readb(ptr), TESTvalue); 245 readb(base), TESTvalue);
226 BUGMSG2(D_INIT_REASONS, "S3: "); 246 BUGMSG2(D_INIT_REASONS, "S3: ");
227 BUGLVL(D_INIT_REASONS) numprint = 0; 247 BUGLVL(D_INIT_REASONS) numprint = 0;
228 release_mem_region(*p, BUFFER_SIZE); 248 goto out2;
229 *p-- = shmems[--numshmems];
230 continue;
231 } 249 }
232 /* By writing 0x42 to the TESTvalue location, we also make 250 /* By writing 0x42 to the TESTvalue location, we also make
233 * sure no "mirror" shmem areas show up - if they occur 251 * sure no "mirror" shmem areas show up - if they occur
234 * in another pass through this loop, they will be discarded 252 * in another pass through this loop, they will be discarded
235 * because *cptr != TESTvalue. 253 * because *cptr != TESTvalue.
236 */ 254 */
237 isa_writeb(0x42, ptr); 255 writeb(0x42, base);
238 if (isa_readb(ptr) != 0x42) { 256 if (readb(base) != 0x42) {
239 BUGMSG2(D_INIT_REASONS, "(read only)\n"); 257 BUGMSG2(D_INIT_REASONS, "(read only)\n");
240 BUGMSG2(D_INIT_REASONS, "S3: "); 258 BUGMSG2(D_INIT_REASONS, "S3: ");
241 release_mem_region(*p, BUFFER_SIZE); 259 goto out2;
242 *p-- = shmems[--numshmems];
243 continue;
244 } 260 }
245 BUGMSG2(D_INIT_REASONS, "\n"); 261 BUGMSG2(D_INIT_REASONS, "\n");
246 BUGMSG2(D_INIT_REASONS, "S3: "); 262 BUGMSG2(D_INIT_REASONS, "S3: ");
247 BUGLVL(D_INIT_REASONS) numprint = 0; 263 BUGLVL(D_INIT_REASONS) numprint = 0;
264 iomem[index] = base;
265 continue;
266 out2:
267 iounmap(base);
268 out1:
269 release_mem_region(*p, MIRROR_SIZE);
270 out:
271 *p-- = shmems[--numshmems];
272 index--;
248 } 273 }
249 BUGMSG2(D_INIT, "\n"); 274 BUGMSG2(D_INIT, "\n");
250 275
@@ -252,6 +277,8 @@ static void __init com90xx_probe(void)
252 BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n"); 277 BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n");
253 for (port = &ports[0]; port < ports + numports; port++) 278 for (port = &ports[0]; port < ports + numports; port++)
254 release_region(*port, ARCNET_TOTAL_SIZE); 279 release_region(*port, ARCNET_TOTAL_SIZE);
280 kfree(shmems);
281 kfree(iomem);
255 return; 282 return;
256 } 283 }
257 /* Stage 4: something of a dummy, to report the shmems that are 284 /* Stage 4: something of a dummy, to report the shmems that are
@@ -351,30 +378,32 @@ static void __init com90xx_probe(void)
351 mdelay(RESETtime); 378 mdelay(RESETtime);
352 } else { 379 } else {
353 /* just one shmem and port, assume they match */ 380 /* just one shmem and port, assume they match */
354 isa_writeb(TESTvalue, shmems[0]); 381 writeb(TESTvalue, iomem[0]);
355 } 382 }
356#else 383#else
357 inb(_RESET); 384 inb(_RESET);
358 mdelay(RESETtime); 385 mdelay(RESETtime);
359#endif 386#endif
360 387
361 for (p = &shmems[0]; p < shmems + numshmems; p++) { 388 for (index = 0; index < numshmems; index++) {
362 u_long ptr = *p; 389 u_long ptr = shmems[index];
390 void __iomem *base = iomem[index];
363 391
364 if (isa_readb(ptr) == TESTvalue) { /* found one */ 392 if (readb(base) == TESTvalue) { /* found one */
365 BUGMSG2(D_INIT, "%lXh)\n", *p); 393 BUGMSG2(D_INIT, "%lXh)\n", *p);
366 openparen = 0; 394 openparen = 0;
367 395
368 /* register the card */ 396 /* register the card */
369 if (com90xx_found(*port, airq, *p) == 0) 397 if (com90xx_found(*port, airq, ptr, base) == 0)
370 found = 1; 398 found = 1;
371 numprint = -1; 399 numprint = -1;
372 400
373 /* remove shmem from the list */ 401 /* remove shmem from the list */
374 *p = shmems[--numshmems]; 402 shmems[index] = shmems[--numshmems];
403 iomem[index] = iomem[numshmems];
375 break; /* go to the next I/O port */ 404 break; /* go to the next I/O port */
376 } else { 405 } else {
377 BUGMSG2(D_INIT_REASONS, "%Xh-", isa_readb(ptr)); 406 BUGMSG2(D_INIT_REASONS, "%Xh-", readb(base));
378 } 407 }
379 } 408 }
380 409
@@ -391,17 +420,40 @@ static void __init com90xx_probe(void)
391 BUGLVL(D_INIT_REASONS) printk("\n"); 420 BUGLVL(D_INIT_REASONS) printk("\n");
392 421
393 /* Now put back TESTvalue on all leftover shmems. */ 422 /* Now put back TESTvalue on all leftover shmems. */
394 for (p = &shmems[0]; p < shmems + numshmems; p++) { 423 for (index = 0; index < numshmems; index++) {
395 isa_writeb(TESTvalue, *p); 424 writeb(TESTvalue, iomem[index]);
396 release_mem_region(*p, BUFFER_SIZE); 425 iounmap(iomem[index]);
426 release_mem_region(shmems[index], MIRROR_SIZE);
397 } 427 }
428 kfree(shmems);
429 kfree(iomem);
398} 430}
399 431
432static int check_mirror(unsigned long addr, size_t size)
433{
434 void __iomem *p;
435 int res = -1;
436
437 if (!request_mem_region(addr, size, "arcnet (90xx)"))
438 return -1;
439
440 p = ioremap(addr, size);
441 if (p) {
442 if (readb(p) == TESTvalue)
443 res = 1;
444 else
445 res = 0;
446 iounmap(p);
447 }
448
449 release_mem_region(addr, size);
450 return res;
451}
400 452
401/* Set up the struct net_device associated with this card. Called after 453/* Set up the struct net_device associated with this card. Called after
402 * probing succeeds. 454 * probing succeeds.
403 */ 455 */
404static int __init com90xx_found(int ioaddr, int airq, u_long shmem) 456static int __init com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *p)
405{ 457{
406 struct net_device *dev = NULL; 458 struct net_device *dev = NULL;
407 struct arcnet_local *lp; 459 struct arcnet_local *lp;
@@ -412,7 +464,8 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem)
412 dev = alloc_arcdev(device); 464 dev = alloc_arcdev(device);
413 if (!dev) { 465 if (!dev) {
414 BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n"); 466 BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n");
415 release_mem_region(shmem, BUFFER_SIZE); 467 iounmap(p);
468 release_mem_region(shmem, MIRROR_SIZE);
416 return -ENOMEM; 469 return -ENOMEM;
417 } 470 }
418 lp = dev->priv; 471 lp = dev->priv;
@@ -423,24 +476,27 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem)
423 * 2k (or there are no mirrors at all) but on some, it's 4k. 476 * 2k (or there are no mirrors at all) but on some, it's 4k.
424 */ 477 */
425 mirror_size = MIRROR_SIZE; 478 mirror_size = MIRROR_SIZE;
426 if (isa_readb(shmem) == TESTvalue 479 if (readb(p) == TESTvalue &&
427 && isa_readb(shmem - mirror_size) != TESTvalue 480 check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 &&
428 && isa_readb(shmem - 2 * mirror_size) == TESTvalue) 481 check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1)
429 mirror_size *= 2; 482 mirror_size = 2 * MIRROR_SIZE;
430 483
431 first_mirror = last_mirror = shmem; 484 first_mirror = shmem - mirror_size;
432 while (isa_readb(first_mirror) == TESTvalue) 485 while (check_mirror(first_mirror, mirror_size) == 1)
433 first_mirror -= mirror_size; 486 first_mirror -= mirror_size;
434 first_mirror += mirror_size; 487 first_mirror += mirror_size;
435 488
436 while (isa_readb(last_mirror) == TESTvalue) 489 last_mirror = shmem + mirror_size;
490 while (check_mirror(last_mirror, mirror_size) == 1)
437 last_mirror += mirror_size; 491 last_mirror += mirror_size;
438 last_mirror -= mirror_size; 492 last_mirror -= mirror_size;
439 493
440 dev->mem_start = first_mirror; 494 dev->mem_start = first_mirror;
441 dev->mem_end = last_mirror + MIRROR_SIZE - 1; 495 dev->mem_end = last_mirror + MIRROR_SIZE - 1;
442 496
443 release_mem_region(shmem, BUFFER_SIZE); 497 iounmap(p);
498 release_mem_region(shmem, MIRROR_SIZE);
499
444 if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)")) 500 if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)"))
445 goto err_free_dev; 501 goto err_free_dev;
446 502
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index 6d7913704fb5..6d6c69f036ef 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -43,7 +43,7 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
43 int bufnum); 43 int bufnum);
44 44
45 45
46struct ArcProto rfc1051_proto = 46static struct ArcProto rfc1051_proto =
47{ 47{
48 .suffix = 's', 48 .suffix = 's',
49 .mtu = XMTU - RFC1051_HDR_SIZE, 49 .mtu = XMTU - RFC1051_HDR_SIZE,
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index 6b6ae4bf3d39..bee34226abfa 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -43,7 +43,7 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
43 int bufnum); 43 int bufnum);
44static int continue_tx(struct net_device *dev, int bufnum); 44static int continue_tx(struct net_device *dev, int bufnum);
45 45
46struct ArcProto rfc1201_proto = 46static struct ArcProto rfc1201_proto =
47{ 47{
48 .suffix = 'a', 48 .suffix = 'a',
49 .mtu = 1500, /* could be more, but some receivers can't handle it... */ 49 .mtu = 1500, /* could be more, but some receivers can't handle it... */
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 6a93b666eb72..d52deb8d2075 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -46,6 +46,7 @@
46#include <linux/device.h> 46#include <linux/device.h>
47#include <linux/init.h> 47#include <linux/init.h>
48#include <linux/bitops.h> 48#include <linux/bitops.h>
49#include <linux/jiffies.h>
49 50
50#include <asm/system.h> 51#include <asm/system.h>
51#include <asm/ecard.h> 52#include <asm/ecard.h>
@@ -355,7 +356,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
355 dma_start = jiffies; 356 dma_start = jiffies;
356 357
357 while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) 358 while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
358 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 359 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
359 printk(KERN_ERR "%s: timeout waiting for TX RDC\n", 360 printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
360 dev->name); 361 dev->name);
361 etherh_reset (dev); 362 etherh_reset (dev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a24200d0a616..b787b6582e50 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -46,7 +46,7 @@ typedef enum {
46} board_t; 46} board_t;
47 47
48/* indexed by board_t, above */ 48/* indexed by board_t, above */
49static struct { 49static const struct {
50 char *name; 50 char *name;
51} board_info[] __devinitdata = { 51} board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" }, 52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
@@ -3476,7 +3476,7 @@ bnx2_test_registers(struct bnx2 *bp)
3476{ 3476{
3477 int ret; 3477 int ret;
3478 int i; 3478 int i;
3479 static struct { 3479 static const struct {
3480 u16 offset; 3480 u16 offset;
3481 u16 flags; 3481 u16 flags;
3482 u32 rw_mask; 3482 u32 rw_mask;
@@ -3891,7 +3891,7 @@ reg_test_err:
3891static int 3891static int
3892bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) 3892bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3893{ 3893{
3894 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, 3894 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3895 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; 3895 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3896 int i; 3896 int i;
3897 3897
@@ -3916,7 +3916,7 @@ bnx2_test_memory(struct bnx2 *bp)
3916{ 3916{
3917 int ret = 0; 3917 int ret = 0;
3918 int i; 3918 int i;
3919 static struct { 3919 static const struct {
3920 u32 offset; 3920 u32 offset;
3921 u32 len; 3921 u32 len;
3922 } mem_tbl[] = { 3922 } mem_tbl[] = {
@@ -5122,7 +5122,7 @@ static struct {
5122 5122
5123#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) 5123#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5124 5124
5125static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { 5125static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5126 STATS_OFFSET32(stat_IfHCInOctets_hi), 5126 STATS_OFFSET32(stat_IfHCInOctets_hi),
5127 STATS_OFFSET32(stat_IfHCInBadOctets_hi), 5127 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5128 STATS_OFFSET32(stat_IfHCOutOctets_hi), 5128 STATS_OFFSET32(stat_IfHCOutOctets_hi),
diff --git a/drivers/net/bnx2_fw.h b/drivers/net/bnx2_fw.h
index 0c21bd849814..8158974c35a8 100644
--- a/drivers/net/bnx2_fw.h
+++ b/drivers/net/bnx2_fw.h
@@ -14,20 +14,20 @@
14 * accompanying it. 14 * accompanying it.
15 */ 15 */
16 16
17static int bnx2_COM_b06FwReleaseMajor = 0x1; 17static const int bnx2_COM_b06FwReleaseMajor = 0x1;
18static int bnx2_COM_b06FwReleaseMinor = 0x0; 18static const int bnx2_COM_b06FwReleaseMinor = 0x0;
19static int bnx2_COM_b06FwReleaseFix = 0x0; 19static const int bnx2_COM_b06FwReleaseFix = 0x0;
20static u32 bnx2_COM_b06FwStartAddr = 0x080008b4; 20static const u32 bnx2_COM_b06FwStartAddr = 0x080008b4;
21static u32 bnx2_COM_b06FwTextAddr = 0x08000000; 21static const u32 bnx2_COM_b06FwTextAddr = 0x08000000;
22static int bnx2_COM_b06FwTextLen = 0x57bc; 22static const int bnx2_COM_b06FwTextLen = 0x57bc;
23static u32 bnx2_COM_b06FwDataAddr = 0x08005840; 23static const u32 bnx2_COM_b06FwDataAddr = 0x08005840;
24static int bnx2_COM_b06FwDataLen = 0x0; 24static const int bnx2_COM_b06FwDataLen = 0x0;
25static u32 bnx2_COM_b06FwRodataAddr = 0x080057c0; 25static const u32 bnx2_COM_b06FwRodataAddr = 0x080057c0;
26static int bnx2_COM_b06FwRodataLen = 0x58; 26static const int bnx2_COM_b06FwRodataLen = 0x58;
27static u32 bnx2_COM_b06FwBssAddr = 0x08005860; 27static const u32 bnx2_COM_b06FwBssAddr = 0x08005860;
28static int bnx2_COM_b06FwBssLen = 0x88; 28static const int bnx2_COM_b06FwBssLen = 0x88;
29static u32 bnx2_COM_b06FwSbssAddr = 0x08005840; 29static const u32 bnx2_COM_b06FwSbssAddr = 0x08005840;
30static int bnx2_COM_b06FwSbssLen = 0x1c; 30static const int bnx2_COM_b06FwSbssLen = 0x1c;
31static u32 bnx2_COM_b06FwText[(0x57bc/4) + 1] = { 31static u32 bnx2_COM_b06FwText[(0x57bc/4) + 1] = {
32 0x0a00022d, 0x00000000, 0x00000000, 0x0000000d, 0x636f6d20, 0x322e352e, 32 0x0a00022d, 0x00000000, 0x00000000, 0x0000000d, 0x636f6d20, 0x322e352e,
33 0x38000000, 0x02050802, 0x00000000, 0x00000003, 0x00000014, 0x00000032, 33 0x38000000, 0x02050802, 0x00000000, 0x00000003, 0x00000014, 0x00000032,
@@ -2325,20 +2325,20 @@ static u32 bnx2_rv2p_proc2[] = {
2325 0x0000000c, 0x29520000, 0x00000018, 0x80000002, 0x0000000c, 0x29800000, 2325 0x0000000c, 0x29520000, 0x00000018, 0x80000002, 0x0000000c, 0x29800000,
2326 0x00000018, 0x00570000 }; 2326 0x00000018, 0x00570000 };
2327 2327
2328static int bnx2_TPAT_b06FwReleaseMajor = 0x1; 2328static const int bnx2_TPAT_b06FwReleaseMajor = 0x1;
2329static int bnx2_TPAT_b06FwReleaseMinor = 0x0; 2329static const int bnx2_TPAT_b06FwReleaseMinor = 0x0;
2330static int bnx2_TPAT_b06FwReleaseFix = 0x0; 2330static const int bnx2_TPAT_b06FwReleaseFix = 0x0;
2331static u32 bnx2_TPAT_b06FwStartAddr = 0x08000860; 2331static const u32 bnx2_TPAT_b06FwStartAddr = 0x08000860;
2332static u32 bnx2_TPAT_b06FwTextAddr = 0x08000800; 2332static const u32 bnx2_TPAT_b06FwTextAddr = 0x08000800;
2333static int bnx2_TPAT_b06FwTextLen = 0x122c; 2333static const int bnx2_TPAT_b06FwTextLen = 0x122c;
2334static u32 bnx2_TPAT_b06FwDataAddr = 0x08001a60; 2334static const u32 bnx2_TPAT_b06FwDataAddr = 0x08001a60;
2335static int bnx2_TPAT_b06FwDataLen = 0x0; 2335static const int bnx2_TPAT_b06FwDataLen = 0x0;
2336static u32 bnx2_TPAT_b06FwRodataAddr = 0x00000000; 2336static const u32 bnx2_TPAT_b06FwRodataAddr = 0x00000000;
2337static int bnx2_TPAT_b06FwRodataLen = 0x0; 2337static const int bnx2_TPAT_b06FwRodataLen = 0x0;
2338static u32 bnx2_TPAT_b06FwBssAddr = 0x08001aa0; 2338static const u32 bnx2_TPAT_b06FwBssAddr = 0x08001aa0;
2339static int bnx2_TPAT_b06FwBssLen = 0x250; 2339static const int bnx2_TPAT_b06FwBssLen = 0x250;
2340static u32 bnx2_TPAT_b06FwSbssAddr = 0x08001a60; 2340static const u32 bnx2_TPAT_b06FwSbssAddr = 0x08001a60;
2341static int bnx2_TPAT_b06FwSbssLen = 0x34; 2341static const int bnx2_TPAT_b06FwSbssLen = 0x34;
2342static u32 bnx2_TPAT_b06FwText[(0x122c/4) + 1] = { 2342static u32 bnx2_TPAT_b06FwText[(0x122c/4) + 1] = {
2343 0x0a000218, 0x00000000, 0x00000000, 0x0000000d, 0x74706174, 0x20322e35, 2343 0x0a000218, 0x00000000, 0x00000000, 0x0000000d, 0x74706174, 0x20322e35,
2344 0x2e313100, 0x02050b01, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 2344 0x2e313100, 0x02050b01, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -2540,20 +2540,20 @@ static u32 bnx2_TPAT_b06FwRodata[(0x0/4) + 1] = { 0x0 };
2540static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 }; 2540static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 };
2541static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 }; 2541static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 };
2542 2542
2543static int bnx2_TXP_b06FwReleaseMajor = 0x1; 2543static const int bnx2_TXP_b06FwReleaseMajor = 0x1;
2544static int bnx2_TXP_b06FwReleaseMinor = 0x0; 2544static const int bnx2_TXP_b06FwReleaseMinor = 0x0;
2545static int bnx2_TXP_b06FwReleaseFix = 0x0; 2545static const int bnx2_TXP_b06FwReleaseFix = 0x0;
2546static u32 bnx2_TXP_b06FwStartAddr = 0x080034b0; 2546static const u32 bnx2_TXP_b06FwStartAddr = 0x080034b0;
2547static u32 bnx2_TXP_b06FwTextAddr = 0x08000000; 2547static const u32 bnx2_TXP_b06FwTextAddr = 0x08000000;
2548static int bnx2_TXP_b06FwTextLen = 0x5748; 2548static const int bnx2_TXP_b06FwTextLen = 0x5748;
2549static u32 bnx2_TXP_b06FwDataAddr = 0x08005760; 2549static const u32 bnx2_TXP_b06FwDataAddr = 0x08005760;
2550static int bnx2_TXP_b06FwDataLen = 0x0; 2550static const int bnx2_TXP_b06FwDataLen = 0x0;
2551static u32 bnx2_TXP_b06FwRodataAddr = 0x00000000; 2551static const u32 bnx2_TXP_b06FwRodataAddr = 0x00000000;
2552static int bnx2_TXP_b06FwRodataLen = 0x0; 2552static const int bnx2_TXP_b06FwRodataLen = 0x0;
2553static u32 bnx2_TXP_b06FwBssAddr = 0x080057a0; 2553static const u32 bnx2_TXP_b06FwBssAddr = 0x080057a0;
2554static int bnx2_TXP_b06FwBssLen = 0x1c4; 2554static const int bnx2_TXP_b06FwBssLen = 0x1c4;
2555static u32 bnx2_TXP_b06FwSbssAddr = 0x08005760; 2555static const u32 bnx2_TXP_b06FwSbssAddr = 0x08005760;
2556static int bnx2_TXP_b06FwSbssLen = 0x38; 2556static const int bnx2_TXP_b06FwSbssLen = 0x38;
2557static u32 bnx2_TXP_b06FwText[(0x5748/4) + 1] = { 2557static u32 bnx2_TXP_b06FwText[(0x5748/4) + 1] = {
2558 0x0a000d2c, 0x00000000, 0x00000000, 0x0000000d, 0x74787020, 0x322e352e, 2558 0x0a000d2c, 0x00000000, 0x00000000, 0x0000000d, 0x74787020, 0x322e352e,
2559 0x38000000, 0x02050800, 0x0000000a, 0x000003e8, 0x0000ea60, 0x00000000, 2559 0x38000000, 0x02050800, 0x0000000a, 0x000003e8, 0x0000ea60, 0x00000000,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f2a63186ae05..e83bc825f6af 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1261,7 +1261,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1261 struct ethhdr *eth_data; 1261 struct ethhdr *eth_data;
1262 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 1262 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1263 struct slave *tx_slave = NULL; 1263 struct slave *tx_slave = NULL;
1264 static u32 ip_bcast = 0xffffffff; 1264 static const u32 ip_bcast = 0xffffffff;
1265 int hash_size = 0; 1265 int hash_size = 0;
1266 int do_tx_balance = 1; 1266 int do_tx_balance = 1;
1267 u32 hash_index = 0; 1267 u32 hash_index = 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bcf9f17daf0d..2d0ac169a86c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -131,7 +131,7 @@ MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
131 131
132/*----------------------------- Global variables ----------------------------*/ 132/*----------------------------- Global variables ----------------------------*/
133 133
134static const char *version = 134static const char * const version =
135 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 135 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
136 136
137LIST_HEAD(bond_dev_list); 137LIST_HEAD(bond_dev_list);
@@ -1040,6 +1040,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1040 if ((bond->params.mode == BOND_MODE_TLB) || 1040 if ((bond->params.mode == BOND_MODE_TLB) ||
1041 (bond->params.mode == BOND_MODE_ALB)) { 1041 (bond->params.mode == BOND_MODE_ALB)) {
1042 bond_alb_handle_active_change(bond, new_active); 1042 bond_alb_handle_active_change(bond, new_active);
1043 if (old_active)
1044 bond_set_slave_inactive_flags(old_active);
1045 if (new_active)
1046 bond_set_slave_active_flags(new_active);
1043 } else { 1047 } else {
1044 bond->curr_active_slave = new_active; 1048 bond->curr_active_slave = new_active;
1045 } 1049 }
@@ -1443,15 +1447,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1443 1447
1444 switch (bond->params.mode) { 1448 switch (bond->params.mode) {
1445 case BOND_MODE_ACTIVEBACKUP: 1449 case BOND_MODE_ACTIVEBACKUP:
1446 /* if we're in active-backup mode, we need one and only one active 1450 /* if we're in active-backup mode, we need one and
1447 * interface. The backup interfaces will have their NOARP flag set 1451 * only one active interface. The backup interfaces
1448 * because we need them to be completely deaf and not to respond to 1452 * will have their SLAVE_INACTIVE flag set because we
1449 * any ARP request on the network to avoid fooling a switch. Thus, 1453 * need them to be drop all packets. Thus, since we
1450 * since we guarantee that curr_active_slave always point to the last 1454 * guarantee that curr_active_slave always point to
1451 * usable interface, we just have to verify this interface's flag. 1455 * the last usable interface, we just have to verify
1456 * this interface's flag.
1452 */ 1457 */
1453 if (((!bond->curr_active_slave) || 1458 if (((!bond->curr_active_slave) ||
1454 (bond->curr_active_slave->dev->flags & IFF_NOARP)) && 1459 (bond->curr_active_slave->dev->priv_flags & IFF_SLAVE_INACTIVE)) &&
1455 (new_slave->link != BOND_LINK_DOWN)) { 1460 (new_slave->link != BOND_LINK_DOWN)) {
1456 dprintk("This is the first active slave\n"); 1461 dprintk("This is the first active slave\n");
1457 /* first slave or no active slave yet, and this link 1462 /* first slave or no active slave yet, and this link
@@ -1492,6 +1497,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1492 * is OK, so make this interface the active one 1497 * is OK, so make this interface the active one
1493 */ 1498 */
1494 bond_change_active_slave(bond, new_slave); 1499 bond_change_active_slave(bond, new_slave);
1500 } else {
1501 bond_set_slave_inactive_flags(new_slave);
1495 } 1502 }
1496 break; 1503 break;
1497 default: 1504 default:
@@ -1724,13 +1731,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1724 addr.sa_family = slave_dev->type; 1731 addr.sa_family = slave_dev->type;
1725 dev_set_mac_address(slave_dev, &addr); 1732 dev_set_mac_address(slave_dev, &addr);
1726 1733
1727 /* restore the original state of the 1734 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
1728 * IFF_NOARP flag that might have been 1735 IFF_SLAVE_INACTIVE);
1729 * set by bond_set_slave_inactive_flags()
1730 */
1731 if ((slave->original_flags & IFF_NOARP) == 0) {
1732 slave_dev->flags &= ~IFF_NOARP;
1733 }
1734 1736
1735 kfree(slave); 1737 kfree(slave);
1736 1738
@@ -1816,12 +1818,8 @@ static int bond_release_all(struct net_device *bond_dev)
1816 addr.sa_family = slave_dev->type; 1818 addr.sa_family = slave_dev->type;
1817 dev_set_mac_address(slave_dev, &addr); 1819 dev_set_mac_address(slave_dev, &addr);
1818 1820
1819 /* restore the original state of the IFF_NOARP flag that might have 1821 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
1820 * been set by bond_set_slave_inactive_flags() 1822 IFF_SLAVE_INACTIVE);
1821 */
1822 if ((slave->original_flags & IFF_NOARP) == 0) {
1823 slave_dev->flags &= ~IFF_NOARP;
1824 }
1825 1823
1826 kfree(slave); 1824 kfree(slave);
1827 1825
@@ -4061,14 +4059,17 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
4061 bond_dev->hard_start_xmit = bond_xmit_broadcast; 4059 bond_dev->hard_start_xmit = bond_xmit_broadcast;
4062 break; 4060 break;
4063 case BOND_MODE_8023AD: 4061 case BOND_MODE_8023AD:
4062 bond_set_master_3ad_flags(bond);
4064 bond_dev->hard_start_xmit = bond_3ad_xmit_xor; 4063 bond_dev->hard_start_xmit = bond_3ad_xmit_xor;
4065 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) 4064 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
4066 bond->xmit_hash_policy = bond_xmit_hash_policy_l34; 4065 bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
4067 else 4066 else
4068 bond->xmit_hash_policy = bond_xmit_hash_policy_l2; 4067 bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
4069 break; 4068 break;
4070 case BOND_MODE_TLB:
4071 case BOND_MODE_ALB: 4069 case BOND_MODE_ALB:
4070 bond_set_master_alb_flags(bond);
4071 /* FALLTHRU */
4072 case BOND_MODE_TLB:
4072 bond_dev->hard_start_xmit = bond_alb_xmit; 4073 bond_dev->hard_start_xmit = bond_alb_xmit;
4073 bond_dev->set_mac_address = bond_alb_set_mac_address; 4074 bond_dev->set_mac_address = bond_alb_set_mac_address;
4074 break; 4075 break;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 041bcc583557..5a9bd95884be 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -424,6 +424,12 @@ static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size
424 ret = -EINVAL; 424 ret = -EINVAL;
425 goto out; 425 goto out;
426 } else { 426 } else {
427 if (bond->params.mode == BOND_MODE_8023AD)
428 bond_unset_master_3ad_flags(bond);
429
430 if (bond->params.mode == BOND_MODE_ALB)
431 bond_unset_master_alb_flags(bond);
432
427 bond->params.mode = new_value; 433 bond->params.mode = new_value;
428 bond_set_mode_ops(bond, bond->params.mode); 434 bond_set_mode_ops(bond, bond->params.mode);
429 printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n", 435 printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n",
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 3dd78d048c3e..ce9dc9b4e2dc 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
22#include "bond_3ad.h" 22#include "bond_3ad.h"
23#include "bond_alb.h" 23#include "bond_alb.h"
24 24
25#define DRV_VERSION "3.0.1" 25#define DRV_VERSION "3.0.2"
26#define DRV_RELDATE "January 9, 2006" 26#define DRV_RELDATE "February 21, 2006"
27#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
29 29
@@ -230,14 +230,37 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
230 230
231static inline void bond_set_slave_inactive_flags(struct slave *slave) 231static inline void bond_set_slave_inactive_flags(struct slave *slave)
232{ 232{
233 slave->state = BOND_STATE_BACKUP; 233 struct bonding *bond = slave->dev->master->priv;
234 slave->dev->flags |= IFF_NOARP; 234 if (bond->params.mode != BOND_MODE_TLB &&
235 bond->params.mode != BOND_MODE_ALB)
236 slave->state = BOND_STATE_BACKUP;
237 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
235} 238}
236 239
237static inline void bond_set_slave_active_flags(struct slave *slave) 240static inline void bond_set_slave_active_flags(struct slave *slave)
238{ 241{
239 slave->state = BOND_STATE_ACTIVE; 242 slave->state = BOND_STATE_ACTIVE;
240 slave->dev->flags &= ~IFF_NOARP; 243 slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE;
244}
245
246static inline void bond_set_master_3ad_flags(struct bonding *bond)
247{
248 bond->dev->priv_flags |= IFF_MASTER_8023AD;
249}
250
251static inline void bond_unset_master_3ad_flags(struct bonding *bond)
252{
253 bond->dev->priv_flags &= ~IFF_MASTER_8023AD;
254}
255
256static inline void bond_set_master_alb_flags(struct bonding *bond)
257{
258 bond->dev->priv_flags |= IFF_MASTER_ALB;
259}
260
261static inline void bond_unset_master_alb_flags(struct bonding *bond)
262{
263 bond->dev->priv_flags &= ~IFF_MASTER_ALB;
241} 264}
242 265
243struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 266struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index e824acaf188a..542e5e065c6f 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -87,15 +87,9 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
87static int tricn_init(adapter_t *adapter) 87static int tricn_init(adapter_t *adapter)
88{ 88{
89 int i = 0; 89 int i = 0;
90 int sme = 1;
91 int stat = 0; 90 int stat = 0;
92 int timeout = 0; 91 int timeout = 0;
93 int is_ready = 0; 92 int is_ready = 0;
94 int dynamic_deskew = 0;
95
96 if (dynamic_deskew)
97 sme = 0;
98
99 93
100 /* 1 */ 94 /* 1 */
101 timeout=1000; 95 timeout=1000;
@@ -113,11 +107,9 @@ static int tricn_init(adapter_t *adapter)
113 } 107 }
114 108
115 /* 2 */ 109 /* 2 */
116 if (sme) { 110 tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
117 tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); 111 tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
118 tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); 112 tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
119 tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
120 }
121 for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); 113 for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
122 for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); 114 for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
123 for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); 115 for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 1ebb5d149aef..12e4e96dba2d 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -686,7 +686,7 @@ int t1_init_hw_modules(adapter_t *adapter)
686 */ 686 */
687static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) 687static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
688{ 688{
689 static unsigned short speed_map[] = { 33, 66, 100, 133 }; 689 static const unsigned short speed_map[] = { 33, 66, 100, 133 };
690 u32 pci_mode; 690 u32 pci_mode;
691 691
692 pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); 692 pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c
index 70b47e4c4e9c..32d13166c6e8 100644
--- a/drivers/net/dgrs.c
+++ b/drivers/net/dgrs.c
@@ -993,7 +993,7 @@ dgrs_download(struct net_device *dev0)
993 int is; 993 int is;
994 unsigned long i; 994 unsigned long i;
995 995
996 static int iv2is[16] = { 996 static const int iv2is[16] = {
997 0, 0, 0, ES4H_IS_INT3, 997 0, 0, 0, ES4H_IS_INT3,
998 0, ES4H_IS_INT5, 0, ES4H_IS_INT7, 998 0, ES4H_IS_INT5, 0, ES4H_IS_INT7,
999 0, 0, ES4H_IS_INT10, ES4H_IS_INT11, 999 0, 0, ES4H_IS_INT10, ES4H_IS_INT11,
diff --git a/drivers/net/dgrs_firmware.c b/drivers/net/dgrs_firmware.c
index 1e49e1e1f201..8c20d4c99937 100644
--- a/drivers/net/dgrs_firmware.c
+++ b/drivers/net/dgrs_firmware.c
@@ -1,4 +1,4 @@
1static int dgrs_firmnum = 550; 1static const int dgrs_firmnum = 550;
2static char dgrs_firmver[] = "$Version$"; 2static char dgrs_firmver[] = "$Version$";
3static char dgrs_firmdate[] = "11/16/96 03:45:15"; 3static char dgrs_firmdate[] = "11/16/96 03:45:15";
4static unsigned char dgrs_code[] __initdata = { 4static unsigned char dgrs_code[] __initdata = {
@@ -9963,4 +9963,4 @@ static unsigned char dgrs_code[] __initdata = {
9963 109,46,99,0,114,99,0,0,48,120,0,0, 9963 109,46,99,0,114,99,0,0,48,120,0,0,
9964 0,0,0,0,0,0,0,0,0,0,0,0 9964 0,0,0,0,0,0,0,0,0,0,0,0
9965 } ; 9965 } ;
9966static int dgrs_ncode = 119520 ; 9966static const int dgrs_ncode = 119520 ;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index fb9dae302dcc..1f3627470c95 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -90,8 +90,8 @@ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
90#define EnableInt() \ 90#define EnableInt() \
91writew(DEFAULT_INTR, ioaddr + IntEnable) 91writew(DEFAULT_INTR, ioaddr + IntEnable)
92 92
93static int max_intrloop = 50; 93static const int max_intrloop = 50;
94static int multicast_filter_limit = 0x40; 94static const int multicast_filter_limit = 0x40;
95 95
96static int rio_open (struct net_device *dev); 96static int rio_open (struct net_device *dev);
97static void rio_timer (unsigned long data); 97static void rio_timer (unsigned long data);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index f57a85feda3d..31ac001f5517 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -598,8 +598,8 @@ static void e100_enable_irq(struct nic *nic)
598 598
599 spin_lock_irqsave(&nic->cmd_lock, flags); 599 spin_lock_irqsave(&nic->cmd_lock, flags);
600 writeb(irq_mask_none, &nic->csr->scb.cmd_hi); 600 writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
601 spin_unlock_irqrestore(&nic->cmd_lock, flags);
602 e100_write_flush(nic); 601 e100_write_flush(nic);
602 spin_unlock_irqrestore(&nic->cmd_lock, flags);
603} 603}
604 604
605static void e100_disable_irq(struct nic *nic) 605static void e100_disable_irq(struct nic *nic)
@@ -608,8 +608,8 @@ static void e100_disable_irq(struct nic *nic)
608 608
609 spin_lock_irqsave(&nic->cmd_lock, flags); 609 spin_lock_irqsave(&nic->cmd_lock, flags);
610 writeb(irq_mask_all, &nic->csr->scb.cmd_hi); 610 writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
611 spin_unlock_irqrestore(&nic->cmd_lock, flags);
612 e100_write_flush(nic); 611 e100_write_flush(nic);
612 spin_unlock_irqrestore(&nic->cmd_lock, flags);
613} 613}
614 614
615static void e100_hw_reset(struct nic *nic) 615static void e100_hw_reset(struct nic *nic)
@@ -1582,8 +1582,8 @@ static void e100_watchdog(unsigned long data)
1582 * interrupt mask bit and the SW Interrupt generation bit */ 1582 * interrupt mask bit and the SW Interrupt generation bit */
1583 spin_lock_irq(&nic->cmd_lock); 1583 spin_lock_irq(&nic->cmd_lock);
1584 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); 1584 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1585 spin_unlock_irq(&nic->cmd_lock);
1586 e100_write_flush(nic); 1585 e100_write_flush(nic);
1586 spin_unlock_irq(&nic->cmd_lock);
1587 1587
1588 e100_update_stats(nic); 1588 e100_update_stats(nic);
1589 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex); 1589 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 99baf0e099fc..281de41d030a 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -83,10 +83,6 @@
83struct e1000_adapter; 83struct e1000_adapter;
84 84
85#include "e1000_hw.h" 85#include "e1000_hw.h"
86#ifdef CONFIG_E1000_MQ
87#include <linux/cpu.h>
88#include <linux/smp.h>
89#endif
90 86
91#ifdef DBG 87#ifdef DBG
92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) 88#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@@ -169,12 +165,6 @@ struct e1000_buffer {
169 uint16_t next_to_watch; 165 uint16_t next_to_watch;
170}; 166};
171 167
172#ifdef CONFIG_E1000_MQ
173struct e1000_queue_stats {
174 uint64_t packets;
175 uint64_t bytes;
176};
177#endif
178 168
179struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; 169struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
180struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; 170struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
@@ -198,12 +188,7 @@ struct e1000_tx_ring {
198 spinlock_t tx_lock; 188 spinlock_t tx_lock;
199 uint16_t tdh; 189 uint16_t tdh;
200 uint16_t tdt; 190 uint16_t tdt;
201
202 boolean_t last_tx_tso; 191 boolean_t last_tx_tso;
203
204#ifdef CONFIG_E1000_MQ
205 struct e1000_queue_stats tx_stats;
206#endif
207}; 192};
208 193
209struct e1000_rx_ring { 194struct e1000_rx_ring {
@@ -230,9 +215,6 @@ struct e1000_rx_ring {
230 215
231 uint16_t rdh; 216 uint16_t rdh;
232 uint16_t rdt; 217 uint16_t rdt;
233#ifdef CONFIG_E1000_MQ
234 struct e1000_queue_stats rx_stats;
235#endif
236}; 218};
237 219
238#define E1000_DESC_UNUSED(R) \ 220#define E1000_DESC_UNUSED(R) \
@@ -260,6 +242,7 @@ struct e1000_adapter {
260 uint32_t rx_buffer_len; 242 uint32_t rx_buffer_len;
261 uint32_t part_num; 243 uint32_t part_num;
262 uint32_t wol; 244 uint32_t wol;
245 uint32_t ksp3_port_a;
263 uint32_t smartspeed; 246 uint32_t smartspeed;
264 uint32_t en_mng_pt; 247 uint32_t en_mng_pt;
265 uint16_t link_speed; 248 uint16_t link_speed;
@@ -269,8 +252,8 @@ struct e1000_adapter {
269 spinlock_t tx_queue_lock; 252 spinlock_t tx_queue_lock;
270#endif 253#endif
271 atomic_t irq_sem; 254 atomic_t irq_sem;
272 struct work_struct tx_timeout_task;
273 struct work_struct watchdog_task; 255 struct work_struct watchdog_task;
256 struct work_struct reset_task;
274 uint8_t fc_autoneg; 257 uint8_t fc_autoneg;
275 258
276 struct timer_list blink_timer; 259 struct timer_list blink_timer;
@@ -278,9 +261,6 @@ struct e1000_adapter {
278 261
279 /* TX */ 262 /* TX */
280 struct e1000_tx_ring *tx_ring; /* One per active queue */ 263 struct e1000_tx_ring *tx_ring; /* One per active queue */
281#ifdef CONFIG_E1000_MQ
282 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
283#endif
284 unsigned long tx_queue_len; 264 unsigned long tx_queue_len;
285 uint32_t txd_cmd; 265 uint32_t txd_cmd;
286 uint32_t tx_int_delay; 266 uint32_t tx_int_delay;
@@ -301,24 +281,19 @@ struct e1000_adapter {
301 /* RX */ 281 /* RX */
302#ifdef CONFIG_E1000_NAPI 282#ifdef CONFIG_E1000_NAPI
303 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 283 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
304 struct e1000_rx_ring *rx_ring, 284 struct e1000_rx_ring *rx_ring,
305 int *work_done, int work_to_do); 285 int *work_done, int work_to_do);
306#else 286#else
307 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 287 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
308 struct e1000_rx_ring *rx_ring); 288 struct e1000_rx_ring *rx_ring);
309#endif 289#endif
310 void (*alloc_rx_buf) (struct e1000_adapter *adapter, 290 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
311 struct e1000_rx_ring *rx_ring, 291 struct e1000_rx_ring *rx_ring,
312 int cleaned_count); 292 int cleaned_count);
313 struct e1000_rx_ring *rx_ring; /* One per active queue */ 293 struct e1000_rx_ring *rx_ring; /* One per active queue */
314#ifdef CONFIG_E1000_NAPI 294#ifdef CONFIG_E1000_NAPI
315 struct net_device *polling_netdev; /* One per active queue */ 295 struct net_device *polling_netdev; /* One per active queue */
316#endif 296#endif
317#ifdef CONFIG_E1000_MQ
318 struct net_device **cpu_netdev; /* per-cpu */
319 struct call_async_data_struct rx_sched_call_data;
320 cpumask_t cpumask;
321#endif
322 int num_tx_queues; 297 int num_tx_queues;
323 int num_rx_queues; 298 int num_rx_queues;
324 299
@@ -353,10 +328,37 @@ struct e1000_adapter {
353 struct e1000_rx_ring test_rx_ring; 328 struct e1000_rx_ring test_rx_ring;
354 329
355 330
356 u32 *config_space; 331 uint32_t *config_space;
357 int msg_enable; 332 int msg_enable;
358#ifdef CONFIG_PCI_MSI 333#ifdef CONFIG_PCI_MSI
359 boolean_t have_msi; 334 boolean_t have_msi;
360#endif 335#endif
336 /* to not mess up cache alignment, always add to the bottom */
337 boolean_t txb2b;
338#ifdef NETIF_F_TSO
339 boolean_t tso_force;
340#endif
361}; 341};
342
343
344/* e1000_main.c */
345extern char e1000_driver_name[];
346extern char e1000_driver_version[];
347int e1000_up(struct e1000_adapter *adapter);
348void e1000_down(struct e1000_adapter *adapter);
349void e1000_reset(struct e1000_adapter *adapter);
350int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
351void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
352int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
353void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
354void e1000_update_stats(struct e1000_adapter *adapter);
355int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
356
357/* e1000_ethtool.c */
358void e1000_set_ethtool_ops(struct net_device *netdev);
359
360/* e1000_param.c */
361void e1000_check_options(struct e1000_adapter *adapter);
362
363
362#endif /* _E1000_H_ */ 364#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 5cedc81786e3..ecccca35c6f4 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -32,19 +32,6 @@
32 32
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34 34
35extern char e1000_driver_name[];
36extern char e1000_driver_version[];
37
38extern int e1000_up(struct e1000_adapter *adapter);
39extern void e1000_down(struct e1000_adapter *adapter);
40extern void e1000_reset(struct e1000_adapter *adapter);
41extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
42extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
43extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
44extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
45extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
46extern void e1000_update_stats(struct e1000_adapter *adapter);
47
48struct e1000_stats { 35struct e1000_stats {
49 char stat_string[ETH_GSTRING_LEN]; 36 char stat_string[ETH_GSTRING_LEN];
50 int sizeof_stat; 37 int sizeof_stat;
@@ -60,7 +47,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
60 { "tx_bytes", E1000_STAT(net_stats.tx_bytes) }, 47 { "tx_bytes", E1000_STAT(net_stats.tx_bytes) },
61 { "rx_errors", E1000_STAT(net_stats.rx_errors) }, 48 { "rx_errors", E1000_STAT(net_stats.rx_errors) },
62 { "tx_errors", E1000_STAT(net_stats.tx_errors) }, 49 { "tx_errors", E1000_STAT(net_stats.tx_errors) },
63 { "rx_dropped", E1000_STAT(net_stats.rx_dropped) },
64 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, 50 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
65 { "multicast", E1000_STAT(net_stats.multicast) }, 51 { "multicast", E1000_STAT(net_stats.multicast) },
66 { "collisions", E1000_STAT(net_stats.collisions) }, 52 { "collisions", E1000_STAT(net_stats.collisions) },
@@ -68,7 +54,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
68 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, 54 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
69 { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, 55 { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) },
70 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 56 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
71 { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) },
72 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, 57 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
73 { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, 58 { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) },
74 { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, 59 { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) },
@@ -97,14 +82,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
97 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, 82 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
98}; 83};
99 84
100#ifdef CONFIG_E1000_MQ
101#define E1000_QUEUE_STATS_LEN \
102 (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
103 ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
104 * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
105#else
106#define E1000_QUEUE_STATS_LEN 0 85#define E1000_QUEUE_STATS_LEN 0
107#endif
108#define E1000_GLOBAL_STATS_LEN \ 86#define E1000_GLOBAL_STATS_LEN \
109 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) 87 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
110#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) 88#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
@@ -346,6 +324,9 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
346 netdev->features |= NETIF_F_TSO; 324 netdev->features |= NETIF_F_TSO;
347 else 325 else
348 netdev->features &= ~NETIF_F_TSO; 326 netdev->features &= ~NETIF_F_TSO;
327
328 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
329 adapter->tso_force = TRUE;
349 return 0; 330 return 0;
350} 331}
351#endif /* NETIF_F_TSO */ 332#endif /* NETIF_F_TSO */
@@ -594,6 +575,7 @@ e1000_get_drvinfo(struct net_device *netdev,
594 case e1000_82571: 575 case e1000_82571:
595 case e1000_82572: 576 case e1000_82572:
596 case e1000_82573: 577 case e1000_82573:
578 case e1000_80003es2lan:
597 sprintf(firmware_version, "%d.%d-%d", 579 sprintf(firmware_version, "%d.%d-%d",
598 (eeprom_data & 0xF000) >> 12, 580 (eeprom_data & 0xF000) >> 12,
599 (eeprom_data & 0x0FF0) >> 4, 581 (eeprom_data & 0x0FF0) >> 4,
@@ -642,6 +624,9 @@ e1000_set_ringparam(struct net_device *netdev,
642 struct e1000_rx_ring *rxdr, *rx_old, *rx_new; 624 struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
643 int i, err, tx_ring_size, rx_ring_size; 625 int i, err, tx_ring_size, rx_ring_size;
644 626
627 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
628 return -EINVAL;
629
645 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; 630 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
646 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; 631 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
647 632
@@ -669,9 +654,6 @@ e1000_set_ringparam(struct net_device *netdev,
669 txdr = adapter->tx_ring; 654 txdr = adapter->tx_ring;
670 rxdr = adapter->rx_ring; 655 rxdr = adapter->rx_ring;
671 656
672 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
673 return -EINVAL;
674
675 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 657 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
676 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 658 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
677 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 659 E1000_MAX_RXD : E1000_MAX_82544_RXD));
@@ -767,6 +749,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
767 /* there are several bits on newer hardware that are r/w */ 749 /* there are several bits on newer hardware that are r/w */
768 case e1000_82571: 750 case e1000_82571:
769 case e1000_82572: 751 case e1000_82572:
752 case e1000_80003es2lan:
770 toggle = 0x7FFFF3FF; 753 toggle = 0x7FFFF3FF;
771 break; 754 break;
772 case e1000_82573: 755 case e1000_82573:
@@ -1256,6 +1239,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1256 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); 1239 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
1257 /* autoneg off */ 1240 /* autoneg off */
1258 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); 1241 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
1242 } else if (adapter->hw.phy_type == e1000_phy_gg82563) {
1243 e1000_write_phy_reg(&adapter->hw,
1244 GG82563_PHY_KMRN_MODE_CTRL,
1245 0x1CE);
1259 } 1246 }
1260 /* force 1000, set loopback */ 1247 /* force 1000, set loopback */
1261 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); 1248 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
@@ -1325,6 +1312,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1325 case e1000_82571: 1312 case e1000_82571:
1326 case e1000_82572: 1313 case e1000_82572:
1327 case e1000_82573: 1314 case e1000_82573:
1315 case e1000_80003es2lan:
1328 return e1000_integrated_phy_loopback(adapter); 1316 return e1000_integrated_phy_loopback(adapter);
1329 break; 1317 break;
1330 1318
@@ -1405,6 +1393,11 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
1405 case e1000_82546_rev_3: 1393 case e1000_82546_rev_3:
1406 default: 1394 default:
1407 hw->autoneg = TRUE; 1395 hw->autoneg = TRUE;
1396 if (hw->phy_type == e1000_phy_gg82563) {
1397 e1000_write_phy_reg(hw,
1398 GG82563_PHY_KMRN_MODE_CTRL,
1399 0x180);
1400 }
1408 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); 1401 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1409 if (phy_reg & MII_CR_LOOPBACK) { 1402 if (phy_reg & MII_CR_LOOPBACK) {
1410 phy_reg &= ~MII_CR_LOOPBACK; 1403 phy_reg &= ~MII_CR_LOOPBACK;
@@ -1640,10 +1633,26 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1640 case E1000_DEV_ID_82546EB_QUAD_COPPER: 1633 case E1000_DEV_ID_82546EB_QUAD_COPPER:
1641 case E1000_DEV_ID_82545EM_FIBER: 1634 case E1000_DEV_ID_82545EM_FIBER:
1642 case E1000_DEV_ID_82545EM_COPPER: 1635 case E1000_DEV_ID_82545EM_COPPER:
1636 case E1000_DEV_ID_82546GB_QUAD_COPPER:
1643 wol->supported = 0; 1637 wol->supported = 0;
1644 wol->wolopts = 0; 1638 wol->wolopts = 0;
1645 return; 1639 return;
1646 1640
1641 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1642 /* device id 10B5 port-A supports wol */
1643 if (!adapter->ksp3_port_a) {
1644 wol->supported = 0;
1645 return;
1646 }
1647 /* KSP3 does not suppport UCAST wake-ups for any interface */
1648 wol->supported = WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
1649
1650 if (adapter->wol & E1000_WUFC_EX)
1651 DPRINTK(DRV, ERR, "Interface does not support "
1652 "directed (unicast) frame wake-up packets\n");
1653 wol->wolopts = 0;
1654 goto do_defaults;
1655
1647 case E1000_DEV_ID_82546EB_FIBER: 1656 case E1000_DEV_ID_82546EB_FIBER:
1648 case E1000_DEV_ID_82546GB_FIBER: 1657 case E1000_DEV_ID_82546GB_FIBER:
1649 case E1000_DEV_ID_82571EB_FIBER: 1658 case E1000_DEV_ID_82571EB_FIBER:
@@ -1658,8 +1667,9 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1658 default: 1667 default:
1659 wol->supported = WAKE_UCAST | WAKE_MCAST | 1668 wol->supported = WAKE_UCAST | WAKE_MCAST |
1660 WAKE_BCAST | WAKE_MAGIC; 1669 WAKE_BCAST | WAKE_MAGIC;
1661
1662 wol->wolopts = 0; 1670 wol->wolopts = 0;
1671
1672do_defaults:
1663 if (adapter->wol & E1000_WUFC_EX) 1673 if (adapter->wol & E1000_WUFC_EX)
1664 wol->wolopts |= WAKE_UCAST; 1674 wol->wolopts |= WAKE_UCAST;
1665 if (adapter->wol & E1000_WUFC_MC) 1675 if (adapter->wol & E1000_WUFC_MC)
@@ -1684,10 +1694,22 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1684 case E1000_DEV_ID_82543GC_COPPER: 1694 case E1000_DEV_ID_82543GC_COPPER:
1685 case E1000_DEV_ID_82544EI_FIBER: 1695 case E1000_DEV_ID_82544EI_FIBER:
1686 case E1000_DEV_ID_82546EB_QUAD_COPPER: 1696 case E1000_DEV_ID_82546EB_QUAD_COPPER:
1697 case E1000_DEV_ID_82546GB_QUAD_COPPER:
1687 case E1000_DEV_ID_82545EM_FIBER: 1698 case E1000_DEV_ID_82545EM_FIBER:
1688 case E1000_DEV_ID_82545EM_COPPER: 1699 case E1000_DEV_ID_82545EM_COPPER:
1689 return wol->wolopts ? -EOPNOTSUPP : 0; 1700 return wol->wolopts ? -EOPNOTSUPP : 0;
1690 1701
1702 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1703 /* device id 10B5 port-A supports wol */
1704 if (!adapter->ksp3_port_a)
1705 return wol->wolopts ? -EOPNOTSUPP : 0;
1706
1707 if (wol->wolopts & WAKE_UCAST) {
1708 DPRINTK(DRV, ERR, "Interface does not support "
1709 "directed (unicast) frame wake-up packets\n");
1710 return -EOPNOTSUPP;
1711 }
1712
1691 case E1000_DEV_ID_82546EB_FIBER: 1713 case E1000_DEV_ID_82546EB_FIBER:
1692 case E1000_DEV_ID_82546GB_FIBER: 1714 case E1000_DEV_ID_82546GB_FIBER:
1693 case E1000_DEV_ID_82571EB_FIBER: 1715 case E1000_DEV_ID_82571EB_FIBER:
@@ -1799,11 +1821,6 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1799 struct ethtool_stats *stats, uint64_t *data) 1821 struct ethtool_stats *stats, uint64_t *data)
1800{ 1822{
1801 struct e1000_adapter *adapter = netdev_priv(netdev); 1823 struct e1000_adapter *adapter = netdev_priv(netdev);
1802#ifdef CONFIG_E1000_MQ
1803 uint64_t *queue_stat;
1804 int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
1805 int j, k;
1806#endif
1807 int i; 1824 int i;
1808 1825
1809 e1000_update_stats(adapter); 1826 e1000_update_stats(adapter);
@@ -1812,29 +1829,12 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1812 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1829 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1813 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 1830 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
1814 } 1831 }
1815#ifdef CONFIG_E1000_MQ
1816 for (j = 0; j < adapter->num_tx_queues; j++) {
1817 queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
1818 for (k = 0; k < stat_count; k++)
1819 data[i + k] = queue_stat[k];
1820 i += k;
1821 }
1822 for (j = 0; j < adapter->num_rx_queues; j++) {
1823 queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
1824 for (k = 0; k < stat_count; k++)
1825 data[i + k] = queue_stat[k];
1826 i += k;
1827 }
1828#endif
1829/* BUG_ON(i != E1000_STATS_LEN); */ 1832/* BUG_ON(i != E1000_STATS_LEN); */
1830} 1833}
1831 1834
1832static void 1835static void
1833e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1836e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1834{ 1837{
1835#ifdef CONFIG_E1000_MQ
1836 struct e1000_adapter *adapter = netdev_priv(netdev);
1837#endif
1838 uint8_t *p = data; 1838 uint8_t *p = data;
1839 int i; 1839 int i;
1840 1840
@@ -1849,20 +1849,6 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1849 ETH_GSTRING_LEN); 1849 ETH_GSTRING_LEN);
1850 p += ETH_GSTRING_LEN; 1850 p += ETH_GSTRING_LEN;
1851 } 1851 }
1852#ifdef CONFIG_E1000_MQ
1853 for (i = 0; i < adapter->num_tx_queues; i++) {
1854 sprintf(p, "tx_queue_%u_packets", i);
1855 p += ETH_GSTRING_LEN;
1856 sprintf(p, "tx_queue_%u_bytes", i);
1857 p += ETH_GSTRING_LEN;
1858 }
1859 for (i = 0; i < adapter->num_rx_queues; i++) {
1860 sprintf(p, "rx_queue_%u_packets", i);
1861 p += ETH_GSTRING_LEN;
1862 sprintf(p, "rx_queue_%u_bytes", i);
1863 p += ETH_GSTRING_LEN;
1864 }
1865#endif
1866/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ 1852/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
1867 break; 1853 break;
1868 } 1854 }
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index beeec0fbbeac..523c2c9fc0ac 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -100,6 +100,8 @@ static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset,
100 100
101#define E1000_WRITE_REG_IO(a, reg, val) \ 101#define E1000_WRITE_REG_IO(a, reg, val) \
102 e1000_write_reg_io((a), E1000_##reg, val) 102 e1000_write_reg_io((a), E1000_##reg, val)
103static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw);
104static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
103 105
104/* IGP cable length table */ 106/* IGP cable length table */
105static const 107static const
@@ -153,6 +155,11 @@ e1000_set_phy_type(struct e1000_hw *hw)
153 hw->phy_type = e1000_phy_igp; 155 hw->phy_type = e1000_phy_igp;
154 break; 156 break;
155 } 157 }
158 case GG82563_E_PHY_ID:
159 if (hw->mac_type == e1000_80003es2lan) {
160 hw->phy_type = e1000_phy_gg82563;
161 break;
162 }
156 /* Fall Through */ 163 /* Fall Through */
157 default: 164 default:
158 /* Should never have loaded on this device */ 165 /* Should never have loaded on this device */
@@ -353,12 +360,19 @@ e1000_set_mac_type(struct e1000_hw *hw)
353 case E1000_DEV_ID_82573L: 360 case E1000_DEV_ID_82573L:
354 hw->mac_type = e1000_82573; 361 hw->mac_type = e1000_82573;
355 break; 362 break;
363 case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
364 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
365 hw->mac_type = e1000_80003es2lan;
366 break;
356 default: 367 default:
357 /* Should never have loaded on this device */ 368 /* Should never have loaded on this device */
358 return -E1000_ERR_MAC_TYPE; 369 return -E1000_ERR_MAC_TYPE;
359 } 370 }
360 371
361 switch(hw->mac_type) { 372 switch(hw->mac_type) {
373 case e1000_80003es2lan:
374 hw->swfw_sync_present = TRUE;
375 /* fall through */
362 case e1000_82571: 376 case e1000_82571:
363 case e1000_82572: 377 case e1000_82572:
364 case e1000_82573: 378 case e1000_82573:
@@ -399,6 +413,7 @@ e1000_set_media_type(struct e1000_hw *hw)
399 case E1000_DEV_ID_82546GB_SERDES: 413 case E1000_DEV_ID_82546GB_SERDES:
400 case E1000_DEV_ID_82571EB_SERDES: 414 case E1000_DEV_ID_82571EB_SERDES:
401 case E1000_DEV_ID_82572EI_SERDES: 415 case E1000_DEV_ID_82572EI_SERDES:
416 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
402 hw->media_type = e1000_media_type_internal_serdes; 417 hw->media_type = e1000_media_type_internal_serdes;
403 break; 418 break;
404 default: 419 default:
@@ -575,6 +590,7 @@ e1000_reset_hw(struct e1000_hw *hw)
575 /* fall through */ 590 /* fall through */
576 case e1000_82571: 591 case e1000_82571:
577 case e1000_82572: 592 case e1000_82572:
593 case e1000_80003es2lan:
578 ret_val = e1000_get_auto_rd_done(hw); 594 ret_val = e1000_get_auto_rd_done(hw);
579 if(ret_val) 595 if(ret_val)
580 /* We don't want to continue accessing MAC registers. */ 596 /* We don't want to continue accessing MAC registers. */
@@ -641,6 +657,7 @@ e1000_init_hw(struct e1000_hw *hw)
641 uint16_t cmd_mmrbc; 657 uint16_t cmd_mmrbc;
642 uint16_t stat_mmrbc; 658 uint16_t stat_mmrbc;
643 uint32_t mta_size; 659 uint32_t mta_size;
660 uint32_t reg_data;
644 uint32_t ctrl_ext; 661 uint32_t ctrl_ext;
645 662
646 DEBUGFUNC("e1000_init_hw"); 663 DEBUGFUNC("e1000_init_hw");
@@ -739,6 +756,7 @@ e1000_init_hw(struct e1000_hw *hw)
739 case e1000_82571: 756 case e1000_82571:
740 case e1000_82572: 757 case e1000_82572:
741 case e1000_82573: 758 case e1000_82573:
759 case e1000_80003es2lan:
742 ctrl |= E1000_TXDCTL_COUNT_DESC; 760 ctrl |= E1000_TXDCTL_COUNT_DESC;
743 break; 761 break;
744 } 762 }
@@ -752,12 +770,34 @@ e1000_init_hw(struct e1000_hw *hw)
752 switch (hw->mac_type) { 770 switch (hw->mac_type) {
753 default: 771 default:
754 break; 772 break;
773 case e1000_80003es2lan:
774 /* Enable retransmit on late collisions */
775 reg_data = E1000_READ_REG(hw, TCTL);
776 reg_data |= E1000_TCTL_RTLC;
777 E1000_WRITE_REG(hw, TCTL, reg_data);
778
779 /* Configure Gigabit Carry Extend Padding */
780 reg_data = E1000_READ_REG(hw, TCTL_EXT);
781 reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
782 reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
783 E1000_WRITE_REG(hw, TCTL_EXT, reg_data);
784
785 /* Configure Transmit Inter-Packet Gap */
786 reg_data = E1000_READ_REG(hw, TIPG);
787 reg_data &= ~E1000_TIPG_IPGT_MASK;
788 reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
789 E1000_WRITE_REG(hw, TIPG, reg_data);
790
791 reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
792 reg_data &= ~0x00100000;
793 E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
794 /* Fall through */
755 case e1000_82571: 795 case e1000_82571:
756 case e1000_82572: 796 case e1000_82572:
757 ctrl = E1000_READ_REG(hw, TXDCTL1); 797 ctrl = E1000_READ_REG(hw, TXDCTL1);
758 ctrl &= ~E1000_TXDCTL_WTHRESH; 798 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
759 ctrl |= E1000_TXDCTL_COUNT_DESC | E1000_TXDCTL_FULL_TX_DESC_WB; 799 if(hw->mac_type >= e1000_82571)
760 ctrl |= (1 << 22); 800 ctrl |= E1000_TXDCTL_COUNT_DESC;
761 E1000_WRITE_REG(hw, TXDCTL1, ctrl); 801 E1000_WRITE_REG(hw, TXDCTL1, ctrl);
762 break; 802 break;
763 } 803 }
@@ -906,7 +946,13 @@ e1000_setup_link(struct e1000_hw *hw)
906 * signal detection. So this should be done before e1000_setup_pcs_link() 946 * signal detection. So this should be done before e1000_setup_pcs_link()
907 * or e1000_phy_setup() is called. 947 * or e1000_phy_setup() is called.
908 */ 948 */
909 if(hw->mac_type == e1000_82543) { 949 if (hw->mac_type == e1000_82543) {
950 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
951 1, &eeprom_data);
952 if (ret_val) {
953 DEBUGOUT("EEPROM Read Error\n");
954 return -E1000_ERR_EEPROM;
955 }
910 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << 956 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
911 SWDPIO__EXT_SHIFT); 957 SWDPIO__EXT_SHIFT);
912 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 958 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
@@ -1308,6 +1354,154 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1308 return E1000_SUCCESS; 1354 return E1000_SUCCESS;
1309} 1355}
1310 1356
1357/********************************************************************
1358* Copper link setup for e1000_phy_gg82563 series.
1359*
1360* hw - Struct containing variables accessed by shared code
1361*********************************************************************/
1362static int32_t
1363e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1364{
1365 int32_t ret_val;
1366 uint16_t phy_data;
1367 uint32_t reg_data;
1368
1369 DEBUGFUNC("e1000_copper_link_ggp_setup");
1370
1371 if(!hw->phy_reset_disable) {
1372
1373 /* Enable CRS on TX for half-duplex operation. */
1374 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
1375 &phy_data);
1376 if(ret_val)
1377 return ret_val;
1378
1379 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
1380 /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
1381 phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
1382
1383 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
1384 phy_data);
1385 if(ret_val)
1386 return ret_val;
1387
1388 /* Options:
1389 * MDI/MDI-X = 0 (default)
1390 * 0 - Auto for all speeds
1391 * 1 - MDI mode
1392 * 2 - MDI-X mode
1393 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
1394 */
1395 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
1396 if(ret_val)
1397 return ret_val;
1398
1399 phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
1400
1401 switch (hw->mdix) {
1402 case 1:
1403 phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
1404 break;
1405 case 2:
1406 phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
1407 break;
1408 case 0:
1409 default:
1410 phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
1411 break;
1412 }
1413
1414 /* Options:
1415 * disable_polarity_correction = 0 (default)
1416 * Automatic Correction for Reversed Cable Polarity
1417 * 0 - Disabled
1418 * 1 - Enabled
1419 */
1420 phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
1421 if(hw->disable_polarity_correction == 1)
1422 phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
1423 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
1424
1425 if(ret_val)
1426 return ret_val;
1427
1428 /* SW Reset the PHY so all changes take effect */
1429 ret_val = e1000_phy_reset(hw);
1430 if (ret_val) {
1431 DEBUGOUT("Error Resetting the PHY\n");
1432 return ret_val;
1433 }
1434 } /* phy_reset_disable */
1435
1436 if (hw->mac_type == e1000_80003es2lan) {
1437 /* Bypass RX and TX FIFO's */
1438 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
1439 E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
1440 E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
1441 if (ret_val)
1442 return ret_val;
1443
1444 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
1445 if (ret_val)
1446 return ret_val;
1447
1448 phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
1449 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
1450
1451 if (ret_val)
1452 return ret_val;
1453
1454 reg_data = E1000_READ_REG(hw, CTRL_EXT);
1455 reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
1456 E1000_WRITE_REG(hw, CTRL_EXT, reg_data);
1457
1458 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
1459 &phy_data);
1460 if (ret_val)
1461 return ret_val;
1462
1463 /* Do not init these registers when the HW is in IAMT mode, since the
1464 * firmware will have already initialized them. We only initialize
1465 * them if the HW is not in IAMT mode.
1466 */
1467 if (e1000_check_mng_mode(hw) == FALSE) {
1468 /* Enable Electrical Idle on the PHY */
1469 phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
1470 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
1471 phy_data);
1472 if (ret_val)
1473 return ret_val;
1474
1475 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
1476 &phy_data);
1477 if (ret_val)
1478 return ret_val;
1479
1480 /* Enable Pass False Carrier on the PHY */
1481 phy_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
1482
1483 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
1484 phy_data);
1485 if (ret_val)
1486 return ret_val;
1487 }
1488
1489 /* Workaround: Disable padding in Kumeran interface in the MAC
1490 * and in the PHY to avoid CRC errors.
1491 */
1492 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
1493 &phy_data);
1494 if (ret_val)
1495 return ret_val;
1496 phy_data |= GG82563_ICR_DIS_PADDING;
1497 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
1498 phy_data);
1499 if (ret_val)
1500 return ret_val;
1501 }
1502
1503 return E1000_SUCCESS;
1504}
1311 1505
1312/******************************************************************** 1506/********************************************************************
1313* Copper link setup for e1000_phy_m88 series. 1507* Copper link setup for e1000_phy_m88 series.
@@ -1518,6 +1712,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1518 int32_t ret_val; 1712 int32_t ret_val;
1519 uint16_t i; 1713 uint16_t i;
1520 uint16_t phy_data; 1714 uint16_t phy_data;
1715 uint16_t reg_data;
1521 1716
1522 DEBUGFUNC("e1000_setup_copper_link"); 1717 DEBUGFUNC("e1000_setup_copper_link");
1523 1718
@@ -1526,6 +1721,22 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1526 if(ret_val) 1721 if(ret_val)
1527 return ret_val; 1722 return ret_val;
1528 1723
1724 switch (hw->mac_type) {
1725 case e1000_80003es2lan:
1726 ret_val = e1000_read_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
1727 &reg_data);
1728 if (ret_val)
1729 return ret_val;
1730 reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
1731 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
1732 reg_data);
1733 if (ret_val)
1734 return ret_val;
1735 break;
1736 default:
1737 break;
1738 }
1739
1529 if (hw->phy_type == e1000_phy_igp || 1740 if (hw->phy_type == e1000_phy_igp ||
1530 hw->phy_type == e1000_phy_igp_2) { 1741 hw->phy_type == e1000_phy_igp_2) {
1531 ret_val = e1000_copper_link_igp_setup(hw); 1742 ret_val = e1000_copper_link_igp_setup(hw);
@@ -1535,6 +1746,10 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1535 ret_val = e1000_copper_link_mgp_setup(hw); 1746 ret_val = e1000_copper_link_mgp_setup(hw);
1536 if(ret_val) 1747 if(ret_val)
1537 return ret_val; 1748 return ret_val;
1749 } else if (hw->phy_type == e1000_phy_gg82563) {
1750 ret_val = e1000_copper_link_ggp_setup(hw);
1751 if(ret_val)
1752 return ret_val;
1538 } 1753 }
1539 1754
1540 if(hw->autoneg) { 1755 if(hw->autoneg) {
@@ -1582,6 +1797,59 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1582} 1797}
1583 1798
1584/****************************************************************************** 1799/******************************************************************************
1800* Configure the MAC-to-PHY interface for 10/100Mbps
1801*
1802* hw - Struct containing variables accessed by shared code
1803******************************************************************************/
1804static int32_t
1805e1000_configure_kmrn_for_10_100(struct e1000_hw *hw)
1806{
1807 int32_t ret_val = E1000_SUCCESS;
1808 uint32_t tipg;
1809 uint16_t reg_data;
1810
1811 DEBUGFUNC("e1000_configure_kmrn_for_10_100");
1812
1813 reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
1814 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
1815 reg_data);
1816 if (ret_val)
1817 return ret_val;
1818
1819 /* Configure Transmit Inter-Packet Gap */
1820 tipg = E1000_READ_REG(hw, TIPG);
1821 tipg &= ~E1000_TIPG_IPGT_MASK;
1822 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
1823 E1000_WRITE_REG(hw, TIPG, tipg);
1824
1825 return ret_val;
1826}
1827
1828static int32_t
1829e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
1830{
1831 int32_t ret_val = E1000_SUCCESS;
1832 uint16_t reg_data;
1833 uint32_t tipg;
1834
1835 DEBUGFUNC("e1000_configure_kmrn_for_1000");
1836
1837 reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
1838 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
1839 reg_data);
1840 if (ret_val)
1841 return ret_val;
1842
1843 /* Configure Transmit Inter-Packet Gap */
1844 tipg = E1000_READ_REG(hw, TIPG);
1845 tipg &= ~E1000_TIPG_IPGT_MASK;
1846 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
1847 E1000_WRITE_REG(hw, TIPG, tipg);
1848
1849 return ret_val;
1850}
1851
1852/******************************************************************************
1585* Configures PHY autoneg and flow control advertisement settings 1853* Configures PHY autoneg and flow control advertisement settings
1586* 1854*
1587* hw - Struct containing variables accessed by shared code 1855* hw - Struct containing variables accessed by shared code
@@ -1802,7 +2070,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1802 /* Write the configured values back to the Device Control Reg. */ 2070 /* Write the configured values back to the Device Control Reg. */
1803 E1000_WRITE_REG(hw, CTRL, ctrl); 2071 E1000_WRITE_REG(hw, CTRL, ctrl);
1804 2072
1805 if (hw->phy_type == e1000_phy_m88) { 2073 if ((hw->phy_type == e1000_phy_m88) ||
2074 (hw->phy_type == e1000_phy_gg82563)) {
1806 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 2075 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1807 if(ret_val) 2076 if(ret_val)
1808 return ret_val; 2077 return ret_val;
@@ -1871,7 +2140,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1871 msec_delay(100); 2140 msec_delay(100);
1872 } 2141 }
1873 if((i == 0) && 2142 if((i == 0) &&
1874 (hw->phy_type == e1000_phy_m88)) { 2143 ((hw->phy_type == e1000_phy_m88) ||
2144 (hw->phy_type == e1000_phy_gg82563))) {
1875 /* We didn't get link. Reset the DSP and wait again for link. */ 2145 /* We didn't get link. Reset the DSP and wait again for link. */
1876 ret_val = e1000_phy_reset_dsp(hw); 2146 ret_val = e1000_phy_reset_dsp(hw);
1877 if(ret_val) { 2147 if(ret_val) {
@@ -1930,6 +2200,27 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1930 if(ret_val) 2200 if(ret_val)
1931 return ret_val; 2201 return ret_val;
1932 } 2202 }
2203 } else if (hw->phy_type == e1000_phy_gg82563) {
2204 /* The TX_CLK of the Extended PHY Specific Control Register defaults
2205 * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if
2206 * we're not in a forced 10/duplex configuration. */
2207 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
2208 if (ret_val)
2209 return ret_val;
2210
2211 phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
2212 if ((hw->forced_speed_duplex == e1000_10_full) ||
2213 (hw->forced_speed_duplex == e1000_10_half))
2214 phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
2215 else
2216 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
2217
2218 /* Also due to the reset, we need to enable CRS on Tx. */
2219 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
2220
2221 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
2222 if (ret_val)
2223 return ret_val;
1933 } 2224 }
1934 return E1000_SUCCESS; 2225 return E1000_SUCCESS;
1935} 2226}
@@ -2592,6 +2883,16 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
2592 } 2883 }
2593 } 2884 }
2594 2885
2886 if ((hw->mac_type == e1000_80003es2lan) &&
2887 (hw->media_type == e1000_media_type_copper)) {
2888 if (*speed == SPEED_1000)
2889 ret_val = e1000_configure_kmrn_for_1000(hw);
2890 else
2891 ret_val = e1000_configure_kmrn_for_10_100(hw);
2892 if (ret_val)
2893 return ret_val;
2894 }
2895
2595 return E1000_SUCCESS; 2896 return E1000_SUCCESS;
2596} 2897}
2597 2898
@@ -2767,6 +3068,72 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
2767 return data; 3068 return data;
2768} 3069}
2769 3070
3071int32_t
3072e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3073{
3074 uint32_t swfw_sync = 0;
3075 uint32_t swmask = mask;
3076 uint32_t fwmask = mask << 16;
3077 int32_t timeout = 200;
3078
3079 DEBUGFUNC("e1000_swfw_sync_acquire");
3080
3081 if (!hw->swfw_sync_present)
3082 return e1000_get_hw_eeprom_semaphore(hw);
3083
3084 while(timeout) {
3085 if (e1000_get_hw_eeprom_semaphore(hw))
3086 return -E1000_ERR_SWFW_SYNC;
3087
3088 swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC);
3089 if (!(swfw_sync & (fwmask | swmask))) {
3090 break;
3091 }
3092
3093 /* firmware currently using resource (fwmask) */
3094 /* or other software thread currently using resource (swmask) */
3095 e1000_put_hw_eeprom_semaphore(hw);
3096 msec_delay_irq(5);
3097 timeout--;
3098 }
3099
3100 if (!timeout) {
3101 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
3102 return -E1000_ERR_SWFW_SYNC;
3103 }
3104
3105 swfw_sync |= swmask;
3106 E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync);
3107
3108 e1000_put_hw_eeprom_semaphore(hw);
3109 return E1000_SUCCESS;
3110}
3111
3112void
3113e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
3114{
3115 uint32_t swfw_sync;
3116 uint32_t swmask = mask;
3117
3118 DEBUGFUNC("e1000_swfw_sync_release");
3119
3120 if (!hw->swfw_sync_present) {
3121 e1000_put_hw_eeprom_semaphore(hw);
3122 return;
3123 }
3124
3125 /* if (e1000_get_hw_eeprom_semaphore(hw))
3126 * return -E1000_ERR_SWFW_SYNC; */
3127 while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
3128 /* empty */
3129
3130 swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC);
3131 swfw_sync &= ~swmask;
3132 E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync);
3133
3134 e1000_put_hw_eeprom_semaphore(hw);
3135}
3136
2770/***************************************************************************** 3137/*****************************************************************************
2771* Reads the value from a PHY register, if the value is on a specific non zero 3138* Reads the value from a PHY register, if the value is on a specific non zero
2772* page, sets the page first. 3139* page, sets the page first.
@@ -2779,22 +3146,55 @@ e1000_read_phy_reg(struct e1000_hw *hw,
2779 uint16_t *phy_data) 3146 uint16_t *phy_data)
2780{ 3147{
2781 uint32_t ret_val; 3148 uint32_t ret_val;
3149 uint16_t swfw;
2782 3150
2783 DEBUGFUNC("e1000_read_phy_reg"); 3151 DEBUGFUNC("e1000_read_phy_reg");
2784 3152
3153 if ((hw->mac_type == e1000_80003es2lan) &&
3154 (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
3155 swfw = E1000_SWFW_PHY1_SM;
3156 } else {
3157 swfw = E1000_SWFW_PHY0_SM;
3158 }
3159 if (e1000_swfw_sync_acquire(hw, swfw))
3160 return -E1000_ERR_SWFW_SYNC;
3161
2785 if((hw->phy_type == e1000_phy_igp || 3162 if((hw->phy_type == e1000_phy_igp ||
2786 hw->phy_type == e1000_phy_igp_2) && 3163 hw->phy_type == e1000_phy_igp_2) &&
2787 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3164 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
2788 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3165 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
2789 (uint16_t)reg_addr); 3166 (uint16_t)reg_addr);
2790 if(ret_val) { 3167 if(ret_val) {
3168 e1000_swfw_sync_release(hw, swfw);
2791 return ret_val; 3169 return ret_val;
2792 } 3170 }
3171 } else if (hw->phy_type == e1000_phy_gg82563) {
3172 if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
3173 (hw->mac_type == e1000_80003es2lan)) {
3174 /* Select Configuration Page */
3175 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3176 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3177 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
3178 } else {
3179 /* Use Alternative Page Select register to access
3180 * registers 30 and 31
3181 */
3182 ret_val = e1000_write_phy_reg_ex(hw,
3183 GG82563_PHY_PAGE_SELECT_ALT,
3184 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
3185 }
3186
3187 if (ret_val) {
3188 e1000_swfw_sync_release(hw, swfw);
3189 return ret_val;
3190 }
3191 }
2793 } 3192 }
2794 3193
2795 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, 3194 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
2796 phy_data); 3195 phy_data);
2797 3196
3197 e1000_swfw_sync_release(hw, swfw);
2798 return ret_val; 3198 return ret_val;
2799} 3199}
2800 3200
@@ -2885,22 +3285,55 @@ e1000_write_phy_reg(struct e1000_hw *hw,
2885 uint16_t phy_data) 3285 uint16_t phy_data)
2886{ 3286{
2887 uint32_t ret_val; 3287 uint32_t ret_val;
3288 uint16_t swfw;
2888 3289
2889 DEBUGFUNC("e1000_write_phy_reg"); 3290 DEBUGFUNC("e1000_write_phy_reg");
2890 3291
3292 if ((hw->mac_type == e1000_80003es2lan) &&
3293 (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
3294 swfw = E1000_SWFW_PHY1_SM;
3295 } else {
3296 swfw = E1000_SWFW_PHY0_SM;
3297 }
3298 if (e1000_swfw_sync_acquire(hw, swfw))
3299 return -E1000_ERR_SWFW_SYNC;
3300
2891 if((hw->phy_type == e1000_phy_igp || 3301 if((hw->phy_type == e1000_phy_igp ||
2892 hw->phy_type == e1000_phy_igp_2) && 3302 hw->phy_type == e1000_phy_igp_2) &&
2893 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3303 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
2894 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3304 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
2895 (uint16_t)reg_addr); 3305 (uint16_t)reg_addr);
2896 if(ret_val) { 3306 if(ret_val) {
3307 e1000_swfw_sync_release(hw, swfw);
2897 return ret_val; 3308 return ret_val;
2898 } 3309 }
3310 } else if (hw->phy_type == e1000_phy_gg82563) {
3311 if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
3312 (hw->mac_type == e1000_80003es2lan)) {
3313 /* Select Configuration Page */
3314 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3315 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3316 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
3317 } else {
3318 /* Use Alternative Page Select register to access
3319 * registers 30 and 31
3320 */
3321 ret_val = e1000_write_phy_reg_ex(hw,
3322 GG82563_PHY_PAGE_SELECT_ALT,
3323 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
3324 }
3325
3326 if (ret_val) {
3327 e1000_swfw_sync_release(hw, swfw);
3328 return ret_val;
3329 }
3330 }
2899 } 3331 }
2900 3332
2901 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, 3333 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
2902 phy_data); 3334 phy_data);
2903 3335
3336 e1000_swfw_sync_release(hw, swfw);
2904 return ret_val; 3337 return ret_val;
2905} 3338}
2906 3339
@@ -2967,6 +3400,65 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
2967 return E1000_SUCCESS; 3400 return E1000_SUCCESS;
2968} 3401}
2969 3402
3403int32_t
3404e1000_read_kmrn_reg(struct e1000_hw *hw,
3405 uint32_t reg_addr,
3406 uint16_t *data)
3407{
3408 uint32_t reg_val;
3409 uint16_t swfw;
3410 DEBUGFUNC("e1000_read_kmrn_reg");
3411
3412 if ((hw->mac_type == e1000_80003es2lan) &&
3413 (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
3414 swfw = E1000_SWFW_PHY1_SM;
3415 } else {
3416 swfw = E1000_SWFW_PHY0_SM;
3417 }
3418 if (e1000_swfw_sync_acquire(hw, swfw))
3419 return -E1000_ERR_SWFW_SYNC;
3420
3421 /* Write register address */
3422 reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
3423 E1000_KUMCTRLSTA_OFFSET) |
3424 E1000_KUMCTRLSTA_REN;
3425 E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val);
3426 udelay(2);
3427
3428 /* Read the data returned */
3429 reg_val = E1000_READ_REG(hw, KUMCTRLSTA);
3430 *data = (uint16_t)reg_val;
3431
3432 e1000_swfw_sync_release(hw, swfw);
3433 return E1000_SUCCESS;
3434}
3435
3436int32_t
3437e1000_write_kmrn_reg(struct e1000_hw *hw,
3438 uint32_t reg_addr,
3439 uint16_t data)
3440{
3441 uint32_t reg_val;
3442 uint16_t swfw;
3443 DEBUGFUNC("e1000_write_kmrn_reg");
3444
3445 if ((hw->mac_type == e1000_80003es2lan) &&
3446 (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
3447 swfw = E1000_SWFW_PHY1_SM;
3448 } else {
3449 swfw = E1000_SWFW_PHY0_SM;
3450 }
3451 if (e1000_swfw_sync_acquire(hw, swfw))
3452 return -E1000_ERR_SWFW_SYNC;
3453
3454 reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
3455 E1000_KUMCTRLSTA_OFFSET) | data;
3456 E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val);
3457 udelay(2);
3458
3459 e1000_swfw_sync_release(hw, swfw);
3460 return E1000_SUCCESS;
3461}
2970 3462
2971/****************************************************************************** 3463/******************************************************************************
2972* Returns the PHY to the power-on reset state 3464* Returns the PHY to the power-on reset state
@@ -2979,6 +3471,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
2979 uint32_t ctrl, ctrl_ext; 3471 uint32_t ctrl, ctrl_ext;
2980 uint32_t led_ctrl; 3472 uint32_t led_ctrl;
2981 int32_t ret_val; 3473 int32_t ret_val;
3474 uint16_t swfw;
2982 3475
2983 DEBUGFUNC("e1000_phy_hw_reset"); 3476 DEBUGFUNC("e1000_phy_hw_reset");
2984 3477
@@ -2991,11 +3484,21 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
2991 DEBUGOUT("Resetting Phy...\n"); 3484 DEBUGOUT("Resetting Phy...\n");
2992 3485
2993 if(hw->mac_type > e1000_82543) { 3486 if(hw->mac_type > e1000_82543) {
3487 if ((hw->mac_type == e1000_80003es2lan) &&
3488 (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
3489 swfw = E1000_SWFW_PHY1_SM;
3490 } else {
3491 swfw = E1000_SWFW_PHY0_SM;
3492 }
3493 if (e1000_swfw_sync_acquire(hw, swfw)) {
3494 e1000_release_software_semaphore(hw);
3495 return -E1000_ERR_SWFW_SYNC;
3496 }
2994 /* Read the device control register and assert the E1000_CTRL_PHY_RST 3497 /* Read the device control register and assert the E1000_CTRL_PHY_RST
2995 * bit. Then, take it out of reset. 3498 * bit. Then, take it out of reset.
2996 * For pre-e1000_82571 hardware, we delay for 10ms between the assert 3499 * For pre-e1000_82571 hardware, we delay for 10ms between the assert
2997 * and deassert. For e1000_82571 hardware and later, we instead delay 3500 * and deassert. For e1000_82571 hardware and later, we instead delay
2998 * for 10ms after the deassertion. 3501 * for 50us between and 10ms after the deassertion.
2999 */ 3502 */
3000 ctrl = E1000_READ_REG(hw, CTRL); 3503 ctrl = E1000_READ_REG(hw, CTRL);
3001 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); 3504 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
@@ -3011,6 +3514,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3011 3514
3012 if (hw->mac_type >= e1000_82571) 3515 if (hw->mac_type >= e1000_82571)
3013 msec_delay(10); 3516 msec_delay(10);
3517 e1000_swfw_sync_release(hw, swfw);
3014 } else { 3518 } else {
3015 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR 3519 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
3016 * bit to put the PHY into reset. Then, take it out of reset. 3520 * bit to put the PHY into reset. Then, take it out of reset.
@@ -3037,6 +3541,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3037 3541
3038 /* Wait for FW to finish PHY configuration. */ 3542 /* Wait for FW to finish PHY configuration. */
3039 ret_val = e1000_get_phy_cfg_done(hw); 3543 ret_val = e1000_get_phy_cfg_done(hw);
3544 e1000_release_software_semaphore(hw);
3040 3545
3041 return ret_val; 3546 return ret_val;
3042} 3547}
@@ -3114,6 +3619,15 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
3114 return E1000_SUCCESS; 3619 return E1000_SUCCESS;
3115 } 3620 }
3116 3621
3622 /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
3623 * around that forces PHY page 0 to be set or the reads fail. The rest of
3624 * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
3625 * So for ESB-2 we need to have this set so our reads won't fail. If the
3626 * attached PHY is not a e1000_phy_gg82563, the routines below will figure
3627 * this out as well. */
3628 if (hw->mac_type == e1000_80003es2lan)
3629 hw->phy_type = e1000_phy_gg82563;
3630
3117 /* Read the PHY ID Registers to identify which PHY is onboard. */ 3631 /* Read the PHY ID Registers to identify which PHY is onboard. */
3118 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); 3632 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
3119 if(ret_val) 3633 if(ret_val)
@@ -3151,6 +3665,9 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
3151 case e1000_82573: 3665 case e1000_82573:
3152 if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; 3666 if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
3153 break; 3667 break;
3668 case e1000_80003es2lan:
3669 if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
3670 break;
3154 default: 3671 default:
3155 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); 3672 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
3156 return -E1000_ERR_CONFIG; 3673 return -E1000_ERR_CONFIG;
@@ -3177,8 +3694,10 @@ e1000_phy_reset_dsp(struct e1000_hw *hw)
3177 DEBUGFUNC("e1000_phy_reset_dsp"); 3694 DEBUGFUNC("e1000_phy_reset_dsp");
3178 3695
3179 do { 3696 do {
3180 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); 3697 if (hw->phy_type != e1000_phy_gg82563) {
3181 if(ret_val) break; 3698 ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
3699 if(ret_val) break;
3700 }
3182 ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); 3701 ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
3183 if(ret_val) break; 3702 if(ret_val) break;
3184 ret_val = e1000_write_phy_reg(hw, 30, 0x0000); 3703 ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
@@ -3310,8 +3829,17 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
3310 /* Cable Length Estimation and Local/Remote Receiver Information 3829 /* Cable Length Estimation and Local/Remote Receiver Information
3311 * are only valid at 1000 Mbps. 3830 * are only valid at 1000 Mbps.
3312 */ 3831 */
3313 phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 3832 if (hw->phy_type != e1000_phy_gg82563) {
3314 M88E1000_PSSR_CABLE_LENGTH_SHIFT); 3833 phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
3834 M88E1000_PSSR_CABLE_LENGTH_SHIFT);
3835 } else {
3836 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
3837 &phy_data);
3838 if (ret_val)
3839 return ret_val;
3840
3841 phy_info->cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
3842 }
3315 3843
3316 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 3844 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
3317 if(ret_val) 3845 if(ret_val)
@@ -3392,7 +3920,8 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
3392 3920
3393/****************************************************************************** 3921/******************************************************************************
3394 * Sets up eeprom variables in the hw struct. Must be called after mac_type 3922 * Sets up eeprom variables in the hw struct. Must be called after mac_type
3395 * is configured. 3923 * is configured. Additionally, if this is ICH8, the flash controller GbE
3924 * registers must be mapped, or this will crash.
3396 * 3925 *
3397 * hw - Struct containing variables accessed by shared code 3926 * hw - Struct containing variables accessed by shared code
3398 *****************************************************************************/ 3927 *****************************************************************************/
@@ -3505,6 +4034,20 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
3505 E1000_WRITE_REG(hw, EECD, eecd); 4034 E1000_WRITE_REG(hw, EECD, eecd);
3506 } 4035 }
3507 break; 4036 break;
4037 case e1000_80003es2lan:
4038 eeprom->type = e1000_eeprom_spi;
4039 eeprom->opcode_bits = 8;
4040 eeprom->delay_usec = 1;
4041 if (eecd & E1000_EECD_ADDR_BITS) {
4042 eeprom->page_size = 32;
4043 eeprom->address_bits = 16;
4044 } else {
4045 eeprom->page_size = 8;
4046 eeprom->address_bits = 8;
4047 }
4048 eeprom->use_eerd = TRUE;
4049 eeprom->use_eewr = FALSE;
4050 break;
3508 default: 4051 default:
3509 break; 4052 break;
3510 } 4053 }
@@ -3685,9 +4228,8 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
3685 4228
3686 DEBUGFUNC("e1000_acquire_eeprom"); 4229 DEBUGFUNC("e1000_acquire_eeprom");
3687 4230
3688 if(e1000_get_hw_eeprom_semaphore(hw)) 4231 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
3689 return -E1000_ERR_EEPROM; 4232 return -E1000_ERR_SWFW_SYNC;
3690
3691 eecd = E1000_READ_REG(hw, EECD); 4233 eecd = E1000_READ_REG(hw, EECD);
3692 4234
3693 if (hw->mac_type != e1000_82573) { 4235 if (hw->mac_type != e1000_82573) {
@@ -3706,7 +4248,7 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
3706 eecd &= ~E1000_EECD_REQ; 4248 eecd &= ~E1000_EECD_REQ;
3707 E1000_WRITE_REG(hw, EECD, eecd); 4249 E1000_WRITE_REG(hw, EECD, eecd);
3708 DEBUGOUT("Could not acquire EEPROM grant\n"); 4250 DEBUGOUT("Could not acquire EEPROM grant\n");
3709 e1000_put_hw_eeprom_semaphore(hw); 4251 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
3710 return -E1000_ERR_EEPROM; 4252 return -E1000_ERR_EEPROM;
3711 } 4253 }
3712 } 4254 }
@@ -3829,7 +4371,7 @@ e1000_release_eeprom(struct e1000_hw *hw)
3829 E1000_WRITE_REG(hw, EECD, eecd); 4371 E1000_WRITE_REG(hw, EECD, eecd);
3830 } 4372 }
3831 4373
3832 e1000_put_hw_eeprom_semaphore(hw); 4374 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
3833} 4375}
3834 4376
3835/****************************************************************************** 4377/******************************************************************************
@@ -3908,6 +4450,8 @@ e1000_read_eeprom(struct e1000_hw *hw,
3908 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && 4450 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
3909 hw->eeprom.use_eerd == FALSE) { 4451 hw->eeprom.use_eerd == FALSE) {
3910 switch (hw->mac_type) { 4452 switch (hw->mac_type) {
4453 case e1000_80003es2lan:
4454 break;
3911 default: 4455 default:
3912 /* Prepare the EEPROM for reading */ 4456 /* Prepare the EEPROM for reading */
3913 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) 4457 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
@@ -4025,6 +4569,9 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
4025 uint32_t i = 0; 4569 uint32_t i = 0;
4026 int32_t error = 0; 4570 int32_t error = 0;
4027 4571
4572 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
4573 return -E1000_ERR_SWFW_SYNC;
4574
4028 for (i = 0; i < words; i++) { 4575 for (i = 0; i < words; i++) {
4029 register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | 4576 register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
4030 ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | 4577 ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
@@ -4044,6 +4591,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
4044 } 4591 }
4045 } 4592 }
4046 4593
4594 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
4047 return error; 4595 return error;
4048} 4596}
4049 4597
@@ -4085,6 +4633,8 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
4085{ 4633{
4086 uint32_t eecd = 0; 4634 uint32_t eecd = 0;
4087 4635
4636 DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
4637
4088 if(hw->mac_type == e1000_82573) { 4638 if(hw->mac_type == e1000_82573) {
4089 eecd = E1000_READ_REG(hw, EECD); 4639 eecd = E1000_READ_REG(hw, EECD);
4090 4640
@@ -4511,6 +5061,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
4511 case e1000_82546: 5061 case e1000_82546:
4512 case e1000_82546_rev_3: 5062 case e1000_82546_rev_3:
4513 case e1000_82571: 5063 case e1000_82571:
5064 case e1000_80003es2lan:
4514 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 5065 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
4515 hw->perm_mac_addr[5] ^= 0x01; 5066 hw->perm_mac_addr[5] ^= 0x01;
4516 break; 5067 break;
@@ -4749,8 +5300,37 @@ e1000_rar_set(struct e1000_hw *hw,
4749 rar_low = ((uint32_t) addr[0] | 5300 rar_low = ((uint32_t) addr[0] |
4750 ((uint32_t) addr[1] << 8) | 5301 ((uint32_t) addr[1] << 8) |
4751 ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); 5302 ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24));
5303 rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8));
4752 5304
4753 rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8) | E1000_RAH_AV); 5305 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
5306 * unit hang.
5307 *
5308 * Description:
5309 * If there are any Rx frames queued up or otherwise present in the HW
5310 * before RSS is enabled, and then we enable RSS, the HW Rx unit will
5311 * hang. To work around this issue, we have to disable receives and
5312 * flush out all Rx frames before we enable RSS. To do so, we modify we
5313 * redirect all Rx traffic to manageability and then reset the HW.
5314 * This flushes away Rx frames, and (since the redirections to
5315 * manageability persists across resets) keeps new ones from coming in
5316 * while we work. Then, we clear the Address Valid AV bit for all MAC
5317 * addresses and undo the re-direction to manageability.
5318 * Now, frames are coming in again, but the MAC won't accept them, so
5319 * far so good. We now proceed to initialize RSS (if necessary) and
5320 * configure the Rx unit. Last, we re-enable the AV bits and continue
5321 * on our merry way.
5322 */
5323 switch (hw->mac_type) {
5324 case e1000_82571:
5325 case e1000_82572:
5326 case e1000_80003es2lan:
5327 if (hw->leave_av_bit_off == TRUE)
5328 break;
5329 default:
5330 /* Indicate to hardware the Address is Valid. */
5331 rar_high |= E1000_RAH_AV;
5332 break;
5333 }
4754 5334
4755 E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); 5335 E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
4756 E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); 5336 E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
@@ -5330,6 +5910,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
5330 hw->bus_width = e1000_bus_width_pciex_1; 5910 hw->bus_width = e1000_bus_width_pciex_1;
5331 break; 5911 break;
5332 case e1000_82571: 5912 case e1000_82571:
5913 case e1000_80003es2lan:
5333 hw->bus_type = e1000_bus_type_pci_express; 5914 hw->bus_type = e1000_bus_type_pci_express;
5334 hw->bus_speed = e1000_bus_speed_2500; 5915 hw->bus_speed = e1000_bus_speed_2500;
5335 hw->bus_width = e1000_bus_width_pciex_4; 5916 hw->bus_width = e1000_bus_width_pciex_4;
@@ -5475,6 +6056,34 @@ e1000_get_cable_length(struct e1000_hw *hw,
5475 return -E1000_ERR_PHY; 6056 return -E1000_ERR_PHY;
5476 break; 6057 break;
5477 } 6058 }
6059 } else if (hw->phy_type == e1000_phy_gg82563) {
6060 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
6061 &phy_data);
6062 if (ret_val)
6063 return ret_val;
6064 cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
6065
6066 switch (cable_length) {
6067 case e1000_gg_cable_length_60:
6068 *min_length = 0;
6069 *max_length = e1000_igp_cable_length_60;
6070 break;
6071 case e1000_gg_cable_length_60_115:
6072 *min_length = e1000_igp_cable_length_60;
6073 *max_length = e1000_igp_cable_length_115;
6074 break;
6075 case e1000_gg_cable_length_115_150:
6076 *min_length = e1000_igp_cable_length_115;
6077 *max_length = e1000_igp_cable_length_150;
6078 break;
6079 case e1000_gg_cable_length_150:
6080 *min_length = e1000_igp_cable_length_150;
6081 *max_length = e1000_igp_cable_length_180;
6082 break;
6083 default:
6084 return -E1000_ERR_PHY;
6085 break;
6086 }
5478 } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 6087 } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
5479 uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 6088 uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
5480 {IGP01E1000_PHY_AGC_A, 6089 {IGP01E1000_PHY_AGC_A,
@@ -5584,7 +6193,8 @@ e1000_check_polarity(struct e1000_hw *hw,
5584 6193
5585 DEBUGFUNC("e1000_check_polarity"); 6194 DEBUGFUNC("e1000_check_polarity");
5586 6195
5587 if(hw->phy_type == e1000_phy_m88) { 6196 if ((hw->phy_type == e1000_phy_m88) ||
6197 (hw->phy_type == e1000_phy_gg82563)) {
5588 /* return the Polarity bit in the Status register. */ 6198 /* return the Polarity bit in the Status register. */
5589 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 6199 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
5590 &phy_data); 6200 &phy_data);
@@ -5653,7 +6263,8 @@ e1000_check_downshift(struct e1000_hw *hw)
5653 return ret_val; 6263 return ret_val;
5654 6264
5655 hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; 6265 hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
5656 } else if(hw->phy_type == e1000_phy_m88) { 6266 } else if ((hw->phy_type == e1000_phy_m88) ||
6267 (hw->phy_type == e1000_phy_gg82563)) {
5657 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 6268 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
5658 &phy_data); 6269 &phy_data);
5659 if(ret_val) 6270 if(ret_val)
@@ -6686,6 +7297,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
6686 case e1000_82571: 7297 case e1000_82571:
6687 case e1000_82572: 7298 case e1000_82572:
6688 case e1000_82573: 7299 case e1000_82573:
7300 case e1000_80003es2lan:
6689 while(timeout) { 7301 while(timeout) {
6690 if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; 7302 if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break;
6691 else msec_delay(1); 7303 else msec_delay(1);
@@ -6729,6 +7341,11 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
6729 default: 7341 default:
6730 msec_delay(10); 7342 msec_delay(10);
6731 break; 7343 break;
7344 case e1000_80003es2lan:
7345 /* Separate *_CFG_DONE_* bit for each port */
7346 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
7347 cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
7348 /* Fall Through */
6732 case e1000_82571: 7349 case e1000_82571:
6733 case e1000_82572: 7350 case e1000_82572:
6734 while (timeout) { 7351 while (timeout) {
@@ -6746,12 +7363,6 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
6746 break; 7363 break;
6747 } 7364 }
6748 7365
6749 /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
6750 * Need to wait for PHY configuration completion before accessing NVM
6751 * and PHY. */
6752 if (hw->mac_type == e1000_82573)
6753 msec_delay(25);
6754
6755 return E1000_SUCCESS; 7366 return E1000_SUCCESS;
6756} 7367}
6757 7368
@@ -6777,6 +7388,11 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
6777 if(!hw->eeprom_semaphore_present) 7388 if(!hw->eeprom_semaphore_present)
6778 return E1000_SUCCESS; 7389 return E1000_SUCCESS;
6779 7390
7391 if (hw->mac_type == e1000_80003es2lan) {
7392 /* Get the SW semaphore. */
7393 if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
7394 return -E1000_ERR_EEPROM;
7395 }
6780 7396
6781 /* Get the FW semaphore. */ 7397 /* Get the FW semaphore. */
6782 timeout = hw->eeprom.word_size + 1; 7398 timeout = hw->eeprom.word_size + 1;
@@ -6822,10 +7438,75 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
6822 return; 7438 return;
6823 7439
6824 swsm = E1000_READ_REG(hw, SWSM); 7440 swsm = E1000_READ_REG(hw, SWSM);
7441 if (hw->mac_type == e1000_80003es2lan) {
7442 /* Release both semaphores. */
7443 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
7444 } else
6825 swsm &= ~(E1000_SWSM_SWESMBI); 7445 swsm &= ~(E1000_SWSM_SWESMBI);
6826 E1000_WRITE_REG(hw, SWSM, swsm); 7446 E1000_WRITE_REG(hw, SWSM, swsm);
6827} 7447}
6828 7448
7449/***************************************************************************
7450 *
7451 * Obtaining software semaphore bit (SMBI) before resetting PHY.
7452 *
7453 * hw: Struct containing variables accessed by shared code
7454 *
7455 * returns: - E1000_ERR_RESET if fail to obtain semaphore.
7456 * E1000_SUCCESS at any other case.
7457 *
7458 ***************************************************************************/
7459int32_t
7460e1000_get_software_semaphore(struct e1000_hw *hw)
7461{
7462 int32_t timeout = hw->eeprom.word_size + 1;
7463 uint32_t swsm;
7464
7465 DEBUGFUNC("e1000_get_software_semaphore");
7466
7467 if (hw->mac_type != e1000_80003es2lan)
7468 return E1000_SUCCESS;
7469
7470 while(timeout) {
7471 swsm = E1000_READ_REG(hw, SWSM);
7472 /* If SMBI bit cleared, it is now set and we hold the semaphore */
7473 if(!(swsm & E1000_SWSM_SMBI))
7474 break;
7475 msec_delay_irq(1);
7476 timeout--;
7477 }
7478
7479 if(!timeout) {
7480 DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
7481 return -E1000_ERR_RESET;
7482 }
7483
7484 return E1000_SUCCESS;
7485}
7486
7487/***************************************************************************
7488 *
7489 * Release semaphore bit (SMBI).
7490 *
7491 * hw: Struct containing variables accessed by shared code
7492 *
7493 ***************************************************************************/
7494void
7495e1000_release_software_semaphore(struct e1000_hw *hw)
7496{
7497 uint32_t swsm;
7498
7499 DEBUGFUNC("e1000_release_software_semaphore");
7500
7501 if (hw->mac_type != e1000_80003es2lan)
7502 return;
7503
7504 swsm = E1000_READ_REG(hw, SWSM);
7505 /* Release the SW semaphores.*/
7506 swsm &= ~E1000_SWSM_SMBI;
7507 E1000_WRITE_REG(hw, SWSM, swsm);
7508}
7509
6829/****************************************************************************** 7510/******************************************************************************
6830 * Checks if PHY reset is blocked due to SOL/IDER session, for example. 7511 * Checks if PHY reset is blocked due to SOL/IDER session, for example.
6831 * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to 7512 * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to
@@ -6862,6 +7543,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
6862 case e1000_82571: 7543 case e1000_82571:
6863 case e1000_82572: 7544 case e1000_82572:
6864 case e1000_82573: 7545 case e1000_82573:
7546 case e1000_80003es2lan:
6865 fwsm = E1000_READ_REG(hw, FWSM); 7547 fwsm = E1000_READ_REG(hw, FWSM);
6866 if((fwsm & E1000_FWSM_MODE_MASK) != 0) 7548 if((fwsm & E1000_FWSM_MODE_MASK) != 0)
6867 return TRUE; 7549 return TRUE;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index f1219dd9dbac..150e45e30f87 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -60,6 +60,7 @@ typedef enum {
60 e1000_82571, 60 e1000_82571,
61 e1000_82572, 61 e1000_82572,
62 e1000_82573, 62 e1000_82573,
63 e1000_80003es2lan,
63 e1000_num_macs 64 e1000_num_macs
64} e1000_mac_type; 65} e1000_mac_type;
65 66
@@ -139,6 +140,13 @@ typedef enum {
139} e1000_cable_length; 140} e1000_cable_length;
140 141
141typedef enum { 142typedef enum {
143 e1000_gg_cable_length_60 = 0,
144 e1000_gg_cable_length_60_115 = 1,
145 e1000_gg_cable_length_115_150 = 2,
146 e1000_gg_cable_length_150 = 4
147} e1000_gg_cable_length;
148
149typedef enum {
142 e1000_igp_cable_length_10 = 10, 150 e1000_igp_cable_length_10 = 10,
143 e1000_igp_cable_length_20 = 20, 151 e1000_igp_cable_length_20 = 20,
144 e1000_igp_cable_length_30 = 30, 152 e1000_igp_cable_length_30 = 30,
@@ -208,6 +216,7 @@ typedef enum {
208 e1000_phy_m88 = 0, 216 e1000_phy_m88 = 0,
209 e1000_phy_igp, 217 e1000_phy_igp,
210 e1000_phy_igp_2, 218 e1000_phy_igp_2,
219 e1000_phy_gg82563,
211 e1000_phy_undefined = 0xFF 220 e1000_phy_undefined = 0xFF
212} e1000_phy_type; 221} e1000_phy_type;
213 222
@@ -281,6 +290,7 @@ typedef enum {
281#define E1000_ERR_MASTER_REQUESTS_PENDING 10 290#define E1000_ERR_MASTER_REQUESTS_PENDING 10
282#define E1000_ERR_HOST_INTERFACE_COMMAND 11 291#define E1000_ERR_HOST_INTERFACE_COMMAND 11
283#define E1000_BLK_PHY_RESET 12 292#define E1000_BLK_PHY_RESET 12
293#define E1000_ERR_SWFW_SYNC 13
284 294
285/* Function prototypes */ 295/* Function prototypes */
286/* Initialization */ 296/* Initialization */
@@ -304,6 +314,8 @@ int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
304int32_t e1000_phy_reset(struct e1000_hw *hw); 314int32_t e1000_phy_reset(struct e1000_hw *hw);
305int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 315int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
306int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 316int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
317int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
318int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
307 319
308/* EEPROM Functions */ 320/* EEPROM Functions */
309int32_t e1000_init_eeprom_params(struct e1000_hw *hw); 321int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
@@ -454,6 +466,8 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
454#define E1000_DEV_ID_82573E_IAMT 0x108C 466#define E1000_DEV_ID_82573E_IAMT 0x108C
455#define E1000_DEV_ID_82573L 0x109A 467#define E1000_DEV_ID_82573L 0x109A
456#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 468#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
469#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
470#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
457 471
458 472
459#define NODE_ADDRESS_SIZE 6 473#define NODE_ADDRESS_SIZE 6
@@ -850,6 +864,7 @@ struct e1000_ffvt_entry {
850#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ 864#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
851#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ 865#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
852#define E1000_TCTL 0x00400 /* TX Control - RW */ 866#define E1000_TCTL 0x00400 /* TX Control - RW */
867#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
853#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ 868#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
854#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ 869#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
855#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 870#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
@@ -996,6 +1011,11 @@ struct e1000_ffvt_entry {
996#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ 1011#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
997#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ 1012#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
998 1013
1014#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
1015#define E1000_MDPHYA 0x0003C /* PHY address - RW */
1016#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */
1017#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
1018
999#define E1000_GCR 0x05B00 /* PCI-Ex Control */ 1019#define E1000_GCR 0x05B00 /* PCI-Ex Control */
1000#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ 1020#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
1001#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ 1021#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
@@ -1065,6 +1085,7 @@ struct e1000_ffvt_entry {
1065#define E1000_82542_RXCW E1000_RXCW 1085#define E1000_82542_RXCW E1000_RXCW
1066#define E1000_82542_MTA 0x00200 1086#define E1000_82542_MTA 0x00200
1067#define E1000_82542_TCTL E1000_TCTL 1087#define E1000_82542_TCTL E1000_TCTL
1088#define E1000_82542_TCTL_EXT E1000_TCTL_EXT
1068#define E1000_82542_TIPG E1000_TIPG 1089#define E1000_82542_TIPG E1000_TIPG
1069#define E1000_82542_TDBAL 0x00420 1090#define E1000_82542_TDBAL 0x00420
1070#define E1000_82542_TDBAH 0x00424 1091#define E1000_82542_TDBAH 0x00424
@@ -1212,6 +1233,8 @@ struct e1000_ffvt_entry {
1212#define E1000_82542_RSSRK E1000_RSSRK 1233#define E1000_82542_RSSRK E1000_RSSRK
1213#define E1000_82542_RSSIM E1000_RSSIM 1234#define E1000_82542_RSSIM E1000_RSSIM
1214#define E1000_82542_RSSIR E1000_RSSIR 1235#define E1000_82542_RSSIR E1000_RSSIR
1236#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
1237#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
1215 1238
1216/* Statistics counters collected by the MAC */ 1239/* Statistics counters collected by the MAC */
1217struct e1000_hw_stats { 1240struct e1000_hw_stats {
@@ -1303,6 +1326,7 @@ struct e1000_hw {
1303 e1000_ffe_config ffe_config_state; 1326 e1000_ffe_config ffe_config_state;
1304 uint32_t asf_firmware_present; 1327 uint32_t asf_firmware_present;
1305 uint32_t eeprom_semaphore_present; 1328 uint32_t eeprom_semaphore_present;
1329 uint32_t swfw_sync_present;
1306 unsigned long io_base; 1330 unsigned long io_base;
1307 uint32_t phy_id; 1331 uint32_t phy_id;
1308 uint32_t phy_revision; 1332 uint32_t phy_revision;
@@ -1361,6 +1385,7 @@ struct e1000_hw {
1361 boolean_t ifs_params_forced; 1385 boolean_t ifs_params_forced;
1362 boolean_t in_ifs_mode; 1386 boolean_t in_ifs_mode;
1363 boolean_t mng_reg_access_disabled; 1387 boolean_t mng_reg_access_disabled;
1388 boolean_t leave_av_bit_off;
1364}; 1389};
1365 1390
1366 1391
@@ -1393,6 +1418,8 @@ struct e1000_hw {
1393#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1418#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
1394#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ 1419#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
1395#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ 1420#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
1421#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
1422#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
1396#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1423#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
1397#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1424#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
1398#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ 1425#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
@@ -1429,6 +1456,16 @@ struct e1000_hw {
1429#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ 1456#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
1430#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ 1457#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
1431#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ 1458#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
1459#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
1460#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
1461#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
1462#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
1463#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */
1464#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
1465#define E1000_STATUS_FUSE_8 0x04000000
1466#define E1000_STATUS_FUSE_9 0x08000000
1467#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
1468#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
1432 1469
1433/* Constants used to intrepret the masked PCI-X bus speed. */ 1470/* Constants used to intrepret the masked PCI-X bus speed. */
1434#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ 1471#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
@@ -1506,6 +1543,8 @@ struct e1000_hw {
1506#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 1543#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1507#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 1544#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1508#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 1545#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
1546#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
1547#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000
1509#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 1548#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000
1510#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 1549#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000
1511#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1550#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
@@ -1515,6 +1554,9 @@ struct e1000_hw {
1515#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ 1554#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
1516#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 1555#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
1517#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 1556#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
1557#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */
1558#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */
1559#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
1518 1560
1519/* MDI Control */ 1561/* MDI Control */
1520#define E1000_MDIC_DATA_MASK 0x0000FFFF 1562#define E1000_MDIC_DATA_MASK 0x0000FFFF
@@ -1528,6 +1570,32 @@ struct e1000_hw {
1528#define E1000_MDIC_INT_EN 0x20000000 1570#define E1000_MDIC_INT_EN 0x20000000
1529#define E1000_MDIC_ERROR 0x40000000 1571#define E1000_MDIC_ERROR 0x40000000
1530 1572
1573#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
1574#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
1575#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
1576#define E1000_KUMCTRLSTA_REN 0x00200000
1577
1578#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000
1579#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001
1580#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002
1581#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003
1582#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004
1583#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009
1584#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010
1585#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E
1586#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F
1587
1588/* FIFO Control */
1589#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008
1590#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800
1591
1592/* In-Band Control */
1593#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010
1594
1595/* Half-Duplex Control */
1596#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
1597#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000
1598
1531/* LED Control */ 1599/* LED Control */
1532#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F 1600#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
1533#define E1000_LEDCTL_LED0_MODE_SHIFT 0 1601#define E1000_LEDCTL_LED0_MODE_SHIFT 0
@@ -1590,6 +1658,13 @@ struct e1000_hw {
1590#define E1000_ICR_MNG 0x00040000 /* Manageability event */ 1658#define E1000_ICR_MNG 0x00040000 /* Manageability event */
1591#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ 1659#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
1592#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 1660#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
1661#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
1662#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
1663#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
1664#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
1665#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
1666#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
1667#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
1593 1668
1594/* Interrupt Cause Set */ 1669/* Interrupt Cause Set */
1595#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1670#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1610,6 +1685,12 @@ struct e1000_hw {
1610#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1685#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
1611#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ 1686#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
1612#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1687#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
1688#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1689#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1690#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1691#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1692#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1693#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1613 1694
1614/* Interrupt Mask Set */ 1695/* Interrupt Mask Set */
1615#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1696#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1630,6 +1711,12 @@ struct e1000_hw {
1630#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1711#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
1631#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ 1712#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
1632#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1713#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
1714#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1715#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1716#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1717#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1718#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1719#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1633 1720
1634/* Interrupt Mask Clear */ 1721/* Interrupt Mask Clear */
1635#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1722#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1650,6 +1737,12 @@ struct e1000_hw {
1650#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ 1737#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
1651#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ 1738#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
1652#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1739#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
1740#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1741#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1742#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1743#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1744#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1745#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1653 1746
1654/* Receive Control */ 1747/* Receive Control */
1655#define E1000_RCTL_RST 0x00000001 /* Software reset */ 1748#define E1000_RCTL_RST 0x00000001 /* Software reset */
@@ -1719,6 +1812,12 @@ struct e1000_hw {
1719#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ 1812#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
1720#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ 1813#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
1721 1814
1815/* SW_W_SYNC definitions */
1816#define E1000_SWFW_EEP_SM 0x0001
1817#define E1000_SWFW_PHY0_SM 0x0002
1818#define E1000_SWFW_PHY1_SM 0x0004
1819#define E1000_SWFW_MAC_CSR_SM 0x0008
1820
1722/* Receive Descriptor */ 1821/* Receive Descriptor */
1723#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ 1822#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
1724#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ 1823#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
@@ -1797,6 +1896,11 @@ struct e1000_hw {
1797#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 1896#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
1798#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ 1897#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
1799#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ 1898#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
1899/* Extended Transmit Control */
1900#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */
1901#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
1902
1903#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000
1800 1904
1801/* Receive Checksum Control */ 1905/* Receive Checksum Control */
1802#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ 1906#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
@@ -1874,6 +1978,7 @@ struct e1000_hw {
1874#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ 1978#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
1875#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 1979#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
1876#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ 1980#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
1981#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
1877#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 1982#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
1878#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 1983#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
1879 * filtering */ 1984 * filtering */
@@ -1962,19 +2067,19 @@ struct e1000_host_command_info {
1962/* PCI-Ex registers */ 2067/* PCI-Ex registers */
1963 2068
1964/* PCI-Ex Control Register */ 2069/* PCI-Ex Control Register */
1965#define E1000_GCR_RXD_NO_SNOOP 0x00000001 2070#define E1000_GCR_RXD_NO_SNOOP 0x00000001
1966#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 2071#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
1967#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 2072#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
1968#define E1000_GCR_TXD_NO_SNOOP 0x00000008 2073#define E1000_GCR_TXD_NO_SNOOP 0x00000008
1969#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 2074#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
1970#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 2075#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
1971 2076
1972#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ 2077#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
1973 E1000_GCR_RXDSCW_NO_SNOOP | \ 2078 E1000_GCR_RXDSCW_NO_SNOOP | \
1974 E1000_GCR_RXDSCR_NO_SNOOP | \ 2079 E1000_GCR_RXDSCR_NO_SNOOP | \
1975 E1000_GCR TXD_NO_SNOOP | \ 2080 E1000_GCR_TXD_NO_SNOOP | \
1976 E1000_GCR_TXDSCW_NO_SNOOP | \ 2081 E1000_GCR_TXDSCW_NO_SNOOP | \
1977 E1000_GCR_TXDSCR_NO_SNOOP) 2082 E1000_GCR_TXDSCR_NO_SNOOP)
1978 2083
1979#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 2084#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
1980/* Function Active and Power State to MNG */ 2085/* Function Active and Power State to MNG */
@@ -2035,12 +2140,14 @@ struct e1000_host_command_info {
2035#define EEPROM_INIT_CONTROL1_REG 0x000A 2140#define EEPROM_INIT_CONTROL1_REG 0x000A
2036#define EEPROM_INIT_CONTROL2_REG 0x000F 2141#define EEPROM_INIT_CONTROL2_REG 0x000F
2037#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 2142#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
2143#define EEPROM_INIT_3GIO_3 0x001A
2038#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 2144#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
2039#define EEPROM_CFG 0x0012 2145#define EEPROM_CFG 0x0012
2040#define EEPROM_FLASH_VERSION 0x0032 2146#define EEPROM_FLASH_VERSION 0x0032
2041#define EEPROM_CHECKSUM_REG 0x003F 2147#define EEPROM_CHECKSUM_REG 0x003F
2042 2148
2043#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ 2149#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
2150#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
2044 2151
2045/* Word definitions for ID LED Settings */ 2152/* Word definitions for ID LED Settings */
2046#define ID_LED_RESERVED_0000 0x0000 2153#define ID_LED_RESERVED_0000 0x0000
@@ -2084,6 +2191,9 @@ struct e1000_host_command_info {
2084#define EEPROM_WORD0F_ANE 0x0800 2191#define EEPROM_WORD0F_ANE 0x0800
2085#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 2192#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
2086 2193
2194/* Mask bits for fields in Word 0x1a of the EEPROM */
2195#define EEPROM_WORD1A_ASPM_MASK 0x000C
2196
2087/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ 2197/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
2088#define EEPROM_SUM 0xBABA 2198#define EEPROM_SUM 0xBABA
2089 2199
@@ -2126,8 +2236,11 @@ struct e1000_host_command_info {
2126 2236
2127#define DEFAULT_82542_TIPG_IPGR2 10 2237#define DEFAULT_82542_TIPG_IPGR2 10
2128#define DEFAULT_82543_TIPG_IPGR2 6 2238#define DEFAULT_82543_TIPG_IPGR2 6
2239#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
2129#define E1000_TIPG_IPGR2_SHIFT 20 2240#define E1000_TIPG_IPGR2_SHIFT 20
2130 2241
2242#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
2243#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008
2131#define E1000_TXDMAC_DPP 0x00000001 2244#define E1000_TXDMAC_DPP 0x00000001
2132 2245
2133/* Adaptive IFS defines */ 2246/* Adaptive IFS defines */
@@ -2368,6 +2481,78 @@ struct e1000_host_command_info {
2368 2481
2369#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 2482#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
2370 2483
2484/* Bits...
2485 * 15-5: page
2486 * 4-0: register offset
2487 */
2488#define GG82563_PAGE_SHIFT 5
2489#define GG82563_REG(page, reg) \
2490 (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
2491#define GG82563_MIN_ALT_REG 30
2492
2493/* GG82563 Specific Registers */
2494#define GG82563_PHY_SPEC_CTRL \
2495 GG82563_REG(0, 16) /* PHY Specific Control */
2496#define GG82563_PHY_SPEC_STATUS \
2497 GG82563_REG(0, 17) /* PHY Specific Status */
2498#define GG82563_PHY_INT_ENABLE \
2499 GG82563_REG(0, 18) /* Interrupt Enable */
2500#define GG82563_PHY_SPEC_STATUS_2 \
2501 GG82563_REG(0, 19) /* PHY Specific Status 2 */
2502#define GG82563_PHY_RX_ERR_CNTR \
2503 GG82563_REG(0, 21) /* Receive Error Counter */
2504#define GG82563_PHY_PAGE_SELECT \
2505 GG82563_REG(0, 22) /* Page Select */
2506#define GG82563_PHY_SPEC_CTRL_2 \
2507 GG82563_REG(0, 26) /* PHY Specific Control 2 */
2508#define GG82563_PHY_PAGE_SELECT_ALT \
2509 GG82563_REG(0, 29) /* Alternate Page Select */
2510#define GG82563_PHY_TEST_CLK_CTRL \
2511 GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
2512
2513#define GG82563_PHY_MAC_SPEC_CTRL \
2514 GG82563_REG(2, 21) /* MAC Specific Control Register */
2515#define GG82563_PHY_MAC_SPEC_CTRL_2 \
2516 GG82563_REG(2, 26) /* MAC Specific Control 2 */
2517
2518#define GG82563_PHY_DSP_DISTANCE \
2519 GG82563_REG(5, 26) /* DSP Distance */
2520
2521/* Page 193 - Port Control Registers */
2522#define GG82563_PHY_KMRN_MODE_CTRL \
2523 GG82563_REG(193, 16) /* Kumeran Mode Control */
2524#define GG82563_PHY_PORT_RESET \
2525 GG82563_REG(193, 17) /* Port Reset */
2526#define GG82563_PHY_REVISION_ID \
2527 GG82563_REG(193, 18) /* Revision ID */
2528#define GG82563_PHY_DEVICE_ID \
2529 GG82563_REG(193, 19) /* Device ID */
2530#define GG82563_PHY_PWR_MGMT_CTRL \
2531 GG82563_REG(193, 20) /* Power Management Control */
2532#define GG82563_PHY_RATE_ADAPT_CTRL \
2533 GG82563_REG(193, 25) /* Rate Adaptation Control */
2534
2535/* Page 194 - KMRN Registers */
2536#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
2537 GG82563_REG(194, 16) /* FIFO's Control/Status */
2538#define GG82563_PHY_KMRN_CTRL \
2539 GG82563_REG(194, 17) /* Control */
2540#define GG82563_PHY_INBAND_CTRL \
2541 GG82563_REG(194, 18) /* Inband Control */
2542#define GG82563_PHY_KMRN_DIAGNOSTIC \
2543 GG82563_REG(194, 19) /* Diagnostic */
2544#define GG82563_PHY_ACK_TIMEOUTS \
2545 GG82563_REG(194, 20) /* Acknowledge Timeouts */
2546#define GG82563_PHY_ADV_ABILITY \
2547 GG82563_REG(194, 21) /* Advertised Ability */
2548#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
2549 GG82563_REG(194, 23) /* Link Partner Advertised Ability */
2550#define GG82563_PHY_ADV_NEXT_PAGE \
2551 GG82563_REG(194, 24) /* Advertised Next Page */
2552#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
2553 GG82563_REG(194, 25) /* Link Partner Advertised Next page */
2554#define GG82563_PHY_KMRN_MISC \
2555 GG82563_REG(194, 26) /* Misc. */
2371 2556
2372/* PHY Control Register */ 2557/* PHY Control Register */
2373#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ 2558#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
@@ -2681,6 +2866,113 @@ struct e1000_host_command_info {
2681#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 2866#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
2682#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 2867#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
2683 2868
2869/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
2870#define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */
2871#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */
2872#define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */
2873#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */
2874#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
2875#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */
2876#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */
2877#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */
2878#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */
2879#define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300
2880#define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */
2881#define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */
2882#define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */
2883#define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */
2884#define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */
2885#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000
2886#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12
2887
2888/* PHY Specific Status Register (Page 0, Register 17) */
2889#define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */
2890#define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */
2891#define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */
2892#define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */
2893#define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */
2894#define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */
2895#define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */
2896#define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */
2897#define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */
2898#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
2899#define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */
2900#define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */
2901#define GG82563_PSSR_SPEED_MASK 0xC000
2902#define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */
2903#define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */
2904#define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */
2905
2906/* PHY Specific Status Register 2 (Page 0, Register 19) */
2907#define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */
2908#define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */
2909#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
2910#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */
2911#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */
2912#define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */
2913#define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */
2914#define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */
2915#define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */
2916#define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */
2917#define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */
2918#define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */
2919#define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */
2920
2921/* PHY Specific Control Register 2 (Page 0, Register 26) */
2922#define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */
2923#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C
2924#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */
2925#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */
2926#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */
2927#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */
2928#define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */
2929#define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000
2930#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */
2931#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */
2932
2933/* MAC Specific Control Register (Page 2, Register 21) */
2934/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
2935#define GG82563_MSCR_TX_CLK_MASK 0x0007
2936#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004
2937#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005
2938#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006
2939#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007
2940
2941#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
2942
2943/* DSP Distance Register (Page 5, Register 26) */
2944#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
2945 1 = 50-80M;
2946 2 = 80-110M;
2947 3 = 110-140M;
2948 4 = >140M */
2949
2950/* Kumeran Mode Control Register (Page 193, Register 16) */
2951#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
2952#define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */
2953#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080
2954#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400
2955#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */
2956#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
2957
2958/* Power Management Control Register (Page 193, Register 20) */
2959#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */
2960#define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */
2961#define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */
2962#define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */
2963#define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
2964#define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
2965#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
2966#define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */
2967#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300
2968#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */
2969#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */
2970#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */
2971#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */
2972
2973/* In-Band Control Register (Page 194, Register 18) */
2974#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */
2975
2684 2976
2685/* Bit definitions for valid PHY IDs. */ 2977/* Bit definitions for valid PHY IDs. */
2686/* I = Integrated 2978/* I = Integrated
@@ -2695,6 +2987,7 @@ struct e1000_host_command_info {
2695#define M88E1011_I_REV_4 0x04 2987#define M88E1011_I_REV_4 0x04
2696#define M88E1111_I_PHY_ID 0x01410CC0 2988#define M88E1111_I_PHY_ID 0x01410CC0
2697#define L1LXT971A_PHY_ID 0x001378E0 2989#define L1LXT971A_PHY_ID 0x001378E0
2990#define GG82563_E_PHY_ID 0x01410CA0
2698 2991
2699/* Miscellaneous PHY bit definitions. */ 2992/* Miscellaneous PHY bit definitions. */
2700#define PHY_PREAMBLE 0xFFFFFFFF 2993#define PHY_PREAMBLE 0xFFFFFFFF
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 84dcca3776ee..f39de16e6b97 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -29,6 +29,23 @@
29#include "e1000.h" 29#include "e1000.h"
30 30
31/* Change Log 31/* Change Log
32 * 7.0.33 3-Feb-2006
33 * o Added another fix for the pass false carrier bit
34 * 7.0.32 24-Jan-2006
35 * o Need to rebuild with noew version number for the pass false carrier
36 * fix in e1000_hw.c
37 * 7.0.30 18-Jan-2006
38 * o fixup for tso workaround to disable it for pci-x
39 * o fix mem leak on 82542
40 * o fixes for 10 Mb/s connections and incorrect stats
41 * 7.0.28 01/06/2006
42 * o hardware workaround to only set "speed mode" bit for 1G link.
43 * 7.0.26 12/23/2005
44 * o wake on lan support modified for device ID 10B5
45 * o fix dhcp + vlan issue not making it to the iAMT firmware
46 * 7.0.24 12/9/2005
47 * o New hardware support for the Gigabit NIC embedded in the south bridge
48 * o Fixes to the recycling logic (skb->tail) from IBM LTC
32 * 6.3.9 12/16/2005 49 * 6.3.9 12/16/2005
33 * o incorporate fix for recycled skbs from IBM LTC 50 * o incorporate fix for recycled skbs from IBM LTC
34 * 6.3.7 11/18/2005 51 * 6.3.7 11/18/2005
@@ -46,54 +63,8 @@
46 * rx_buffer_len 63 * rx_buffer_len
47 * 6.3.1 9/19/05 64 * 6.3.1 9/19/05
48 * o Use adapter->tx_timeout_factor in Tx Hung Detect logic 65 * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
49 (e1000_clean_tx_irq) 66 * (e1000_clean_tx_irq)
50 * o Support for 8086:10B5 device (Quad Port) 67 * o Support for 8086:10B5 device (Quad Port)
51 * 6.2.14 9/15/05
52 * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface
53 * open/close
54 * 6.2.13 9/14/05
55 * o Invoke e1000_check_mng_mode only for 8257x controllers since it
56 * accesses the FWSM that is not supported in other controllers
57 * 6.2.12 9/9/05
58 * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
59 * o set RCTL:SECRC only for controllers newer than 82543.
60 * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
61 * This code was moved from e1000_remove to e1000_close
62 * 6.2.10 9/6/05
63 * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
64 * o Enable fc by default on 82573 controllers (do not read eeprom)
65 * o Fix rx_errors statistic not to include missed_packet_count
66 * o Fix rx_dropped statistic not to include missed_packet_count
67 (Padraig Brady)
68 * 6.2.9 8/30/05
69 * o Remove call to update statistics from the controller ib e1000_get_stats
70 * 6.2.8 8/30/05
71 * o Improved algorithm for rx buffer allocation/rdt update
72 * o Flow control watermarks relative to rx PBA size
73 * o Simplified 'Tx Hung' detect logic
74 * 6.2.7 8/17/05
75 * o Report rx buffer allocation failures and tx timeout counts in stats
76 * 6.2.6 8/16/05
77 * o Implement workaround for controller erratum -- linear non-tso packet
78 * following a TSO gets written back prematurely
79 * 6.2.5 8/15/05
80 * o Set netdev->tx_queue_len based on link speed/duplex settings.
81 * o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
82 * o Do not power off PHY if SoL/IDER session is active
83 * 6.2.4 8/10/05
84 * o Fix loopback test setup/cleanup for 82571/3 controllers
85 * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
86 * all packets as raw
87 * o Prevent operations that will cause the PHY to be reset if SoL/IDER
88 * sessions are active and log a message
89 * 6.2.2 7/21/05
90 * o used fixed size descriptors for all MTU sizes, reduces memory load
91 * 6.1.2 4/13/05
92 * o Fixed ethtool diagnostics
93 * o Enabled flow control to take default eeprom settings
94 * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
95 * calls, one from mii_ioctl and other from within update_stats while
96 * processing MIIREG ioctl.
97 */ 68 */
98 69
99char e1000_driver_name[] = "e1000"; 70char e1000_driver_name[] = "e1000";
@@ -103,7 +74,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
103#else 74#else
104#define DRIVERNAPI "-NAPI" 75#define DRIVERNAPI "-NAPI"
105#endif 76#endif
106#define DRV_VERSION "6.3.9-k4"DRIVERNAPI 77#define DRV_VERSION "7.0.33-k2"DRIVERNAPI
107char e1000_driver_version[] = DRV_VERSION; 78char e1000_driver_version[] = DRV_VERSION;
108static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 79static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
109 80
@@ -157,32 +128,26 @@ static struct pci_device_id e1000_pci_tbl[] = {
157 INTEL_E1000_ETHERNET_DEVICE(0x108A), 128 INTEL_E1000_ETHERNET_DEVICE(0x108A),
158 INTEL_E1000_ETHERNET_DEVICE(0x108B), 129 INTEL_E1000_ETHERNET_DEVICE(0x108B),
159 INTEL_E1000_ETHERNET_DEVICE(0x108C), 130 INTEL_E1000_ETHERNET_DEVICE(0x108C),
131 INTEL_E1000_ETHERNET_DEVICE(0x1096),
132 INTEL_E1000_ETHERNET_DEVICE(0x1098),
160 INTEL_E1000_ETHERNET_DEVICE(0x1099), 133 INTEL_E1000_ETHERNET_DEVICE(0x1099),
161 INTEL_E1000_ETHERNET_DEVICE(0x109A), 134 INTEL_E1000_ETHERNET_DEVICE(0x109A),
162 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 135 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
136 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
163 /* required last entry */ 137 /* required last entry */
164 {0,} 138 {0,}
165}; 139};
166 140
167MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 141MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
168 142
169int e1000_up(struct e1000_adapter *adapter);
170void e1000_down(struct e1000_adapter *adapter);
171void e1000_reset(struct e1000_adapter *adapter);
172int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
173int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
174int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
175void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
176void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
177static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 143static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
178 struct e1000_tx_ring *txdr); 144 struct e1000_tx_ring *txdr);
179static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 145static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
180 struct e1000_rx_ring *rxdr); 146 struct e1000_rx_ring *rxdr);
181static void e1000_free_tx_resources(struct e1000_adapter *adapter, 147static void e1000_free_tx_resources(struct e1000_adapter *adapter,
182 struct e1000_tx_ring *tx_ring); 148 struct e1000_tx_ring *tx_ring);
183static void e1000_free_rx_resources(struct e1000_adapter *adapter, 149static void e1000_free_rx_resources(struct e1000_adapter *adapter,
184 struct e1000_rx_ring *rx_ring); 150 struct e1000_rx_ring *rx_ring);
185void e1000_update_stats(struct e1000_adapter *adapter);
186 151
187/* Local Function Prototypes */ 152/* Local Function Prototypes */
188 153
@@ -191,9 +156,6 @@ static void e1000_exit_module(void);
191static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 156static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
192static void __devexit e1000_remove(struct pci_dev *pdev); 157static void __devexit e1000_remove(struct pci_dev *pdev);
193static int e1000_alloc_queues(struct e1000_adapter *adapter); 158static int e1000_alloc_queues(struct e1000_adapter *adapter);
194#ifdef CONFIG_E1000_MQ
195static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
196#endif
197static int e1000_sw_init(struct e1000_adapter *adapter); 159static int e1000_sw_init(struct e1000_adapter *adapter);
198static int e1000_open(struct net_device *netdev); 160static int e1000_open(struct net_device *netdev);
199static int e1000_close(struct net_device *netdev); 161static int e1000_close(struct net_device *netdev);
@@ -241,11 +203,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
241static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 203static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
242static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 204static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
243 int cmd); 205 int cmd);
244void e1000_set_ethtool_ops(struct net_device *netdev);
245static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 206static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
246static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 207static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
247static void e1000_tx_timeout(struct net_device *dev); 208static void e1000_tx_timeout(struct net_device *dev);
248static void e1000_tx_timeout_task(struct net_device *dev); 209static void e1000_reset_task(struct net_device *dev);
249static void e1000_smartspeed(struct e1000_adapter *adapter); 210static void e1000_smartspeed(struct e1000_adapter *adapter);
250static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 211static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
251 struct sk_buff *skb); 212 struct sk_buff *skb);
@@ -265,14 +226,6 @@ static int e1000_resume(struct pci_dev *pdev);
265static void e1000_netpoll (struct net_device *netdev); 226static void e1000_netpoll (struct net_device *netdev);
266#endif 227#endif
267 228
268#ifdef CONFIG_E1000_MQ
269/* for multiple Rx queues */
270void e1000_rx_schedule(void *data);
271#endif
272
273/* Exported from other modules */
274
275extern void e1000_check_options(struct e1000_adapter *adapter);
276 229
277static struct pci_driver e1000_driver = { 230static struct pci_driver e1000_driver = {
278 .name = e1000_driver_name, 231 .name = e1000_driver_name,
@@ -380,7 +333,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
380 (vid != old_vid) && 333 (vid != old_vid) &&
381 !adapter->vlgrp->vlan_devices[old_vid]) 334 !adapter->vlgrp->vlan_devices[old_vid])
382 e1000_vlan_rx_kill_vid(netdev, old_vid); 335 e1000_vlan_rx_kill_vid(netdev, old_vid);
383 } 336 } else
337 adapter->mng_vlan_id = vid;
384 } 338 }
385} 339}
386 340
@@ -502,10 +456,6 @@ e1000_up(struct e1000_adapter *adapter)
502 return err; 456 return err;
503 } 457 }
504 458
505#ifdef CONFIG_E1000_MQ
506 e1000_setup_queue_mapping(adapter);
507#endif
508
509 adapter->tx_queue_len = netdev->tx_queue_len; 459 adapter->tx_queue_len = netdev->tx_queue_len;
510 460
511 mod_timer(&adapter->watchdog_timer, jiffies); 461 mod_timer(&adapter->watchdog_timer, jiffies);
@@ -526,9 +476,7 @@ e1000_down(struct e1000_adapter *adapter)
526 e1000_check_mng_mode(&adapter->hw); 476 e1000_check_mng_mode(&adapter->hw);
527 477
528 e1000_irq_disable(adapter); 478 e1000_irq_disable(adapter);
529#ifdef CONFIG_E1000_MQ 479
530 while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
531#endif
532 free_irq(adapter->pdev->irq, netdev); 480 free_irq(adapter->pdev->irq, netdev);
533#ifdef CONFIG_PCI_MSI 481#ifdef CONFIG_PCI_MSI
534 if (adapter->hw.mac_type > e1000_82547_rev_2 && 482 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
@@ -587,6 +535,7 @@ e1000_reset(struct e1000_adapter *adapter)
587 break; 535 break;
588 case e1000_82571: 536 case e1000_82571:
589 case e1000_82572: 537 case e1000_82572:
538 case e1000_80003es2lan:
590 pba = E1000_PBA_38K; 539 pba = E1000_PBA_38K;
591 break; 540 break;
592 case e1000_82573: 541 case e1000_82573:
@@ -619,7 +568,10 @@ e1000_reset(struct e1000_adapter *adapter)
619 568
620 adapter->hw.fc_high_water = fc_high_water_mark; 569 adapter->hw.fc_high_water = fc_high_water_mark;
621 adapter->hw.fc_low_water = fc_high_water_mark - 8; 570 adapter->hw.fc_low_water = fc_high_water_mark - 8;
622 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 571 if (adapter->hw.mac_type == e1000_80003es2lan)
572 adapter->hw.fc_pause_time = 0xFFFF;
573 else
574 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
623 adapter->hw.fc_send_xon = 1; 575 adapter->hw.fc_send_xon = 1;
624 adapter->hw.fc = adapter->hw.original_fc; 576 adapter->hw.fc = adapter->hw.original_fc;
625 577
@@ -663,6 +615,7 @@ e1000_probe(struct pci_dev *pdev,
663 unsigned long mmio_start, mmio_len; 615 unsigned long mmio_start, mmio_len;
664 616
665 static int cards_found = 0; 617 static int cards_found = 0;
618 static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
666 int i, err, pci_using_dac; 619 int i, err, pci_using_dac;
667 uint16_t eeprom_data; 620 uint16_t eeprom_data;
668 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 621 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
@@ -755,6 +708,15 @@ e1000_probe(struct pci_dev *pdev,
755 if ((err = e1000_check_phy_reset_block(&adapter->hw))) 708 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
756 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 709 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
757 710
711 /* if ksp3, indicate if it's port a being setup */
712 if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
713 e1000_ksp3_port_a == 0)
714 adapter->ksp3_port_a = 1;
715 e1000_ksp3_port_a++;
716 /* Reset for multiple KP3 adapters */
717 if (e1000_ksp3_port_a == 4)
718 e1000_ksp3_port_a = 0;
719
758 if (adapter->hw.mac_type >= e1000_82543) { 720 if (adapter->hw.mac_type >= e1000_82543) {
759 netdev->features = NETIF_F_SG | 721 netdev->features = NETIF_F_SG |
760 NETIF_F_HW_CSUM | 722 NETIF_F_HW_CSUM |
@@ -826,8 +788,8 @@ e1000_probe(struct pci_dev *pdev,
826 adapter->phy_info_timer.function = &e1000_update_phy_info; 788 adapter->phy_info_timer.function = &e1000_update_phy_info;
827 adapter->phy_info_timer.data = (unsigned long) adapter; 789 adapter->phy_info_timer.data = (unsigned long) adapter;
828 790
829 INIT_WORK(&adapter->tx_timeout_task, 791 INIT_WORK(&adapter->reset_task,
830 (void (*)(void *))e1000_tx_timeout_task, netdev); 792 (void (*)(void *))e1000_reset_task, netdev);
831 793
832 /* we're going to reset, so assume we have no link for now */ 794 /* we're going to reset, so assume we have no link for now */
833 795
@@ -854,6 +816,7 @@ e1000_probe(struct pci_dev *pdev,
854 case e1000_82546: 816 case e1000_82546:
855 case e1000_82546_rev_3: 817 case e1000_82546_rev_3:
856 case e1000_82571: 818 case e1000_82571:
819 case e1000_80003es2lan:
857 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ 820 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
858 e1000_read_eeprom(&adapter->hw, 821 e1000_read_eeprom(&adapter->hw,
859 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 822 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
@@ -972,10 +935,6 @@ e1000_remove(struct pci_dev *pdev)
972 iounmap(adapter->hw.hw_addr); 935 iounmap(adapter->hw.hw_addr);
973 pci_release_regions(pdev); 936 pci_release_regions(pdev);
974 937
975#ifdef CONFIG_E1000_MQ
976 free_percpu(adapter->cpu_netdev);
977 free_percpu(adapter->cpu_tx_ring);
978#endif
979 free_netdev(netdev); 938 free_netdev(netdev);
980 939
981 pci_disable_device(pdev); 940 pci_disable_device(pdev);
@@ -1056,40 +1015,8 @@ e1000_sw_init(struct e1000_adapter *adapter)
1056 hw->master_slave = E1000_MASTER_SLAVE; 1015 hw->master_slave = E1000_MASTER_SLAVE;
1057 } 1016 }
1058 1017
1059#ifdef CONFIG_E1000_MQ
1060 /* Number of supported queues */
1061 switch (hw->mac_type) {
1062 case e1000_82571:
1063 case e1000_82572:
1064 /* These controllers support 2 tx queues, but with a single
1065 * qdisc implementation, multiple tx queues aren't quite as
1066 * interesting. If we can find a logical way of mapping
1067 * flows to a queue, then perhaps we can up the num_tx_queue
1068 * count back to its default. Until then, we run the risk of
1069 * terrible performance due to SACK overload. */
1070 adapter->num_tx_queues = 1;
1071 adapter->num_rx_queues = 2;
1072 break;
1073 default:
1074 adapter->num_tx_queues = 1;
1075 adapter->num_rx_queues = 1;
1076 break;
1077 }
1078 adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
1079 adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
1080 DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
1081 adapter->num_rx_queues,
1082 ((adapter->num_rx_queues == 1)
1083 ? ((num_online_cpus() > 1)
1084 ? "(due to unsupported feature in current adapter)"
1085 : "(due to unsupported system configuration)")
1086 : ""));
1087 DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
1088 adapter->num_tx_queues);
1089#else
1090 adapter->num_tx_queues = 1; 1018 adapter->num_tx_queues = 1;
1091 adapter->num_rx_queues = 1; 1019 adapter->num_rx_queues = 1;
1092#endif
1093 1020
1094 if (e1000_alloc_queues(adapter)) { 1021 if (e1000_alloc_queues(adapter)) {
1095 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 1022 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
@@ -1152,51 +1079,9 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
1152 memset(adapter->polling_netdev, 0, size); 1079 memset(adapter->polling_netdev, 0, size);
1153#endif 1080#endif
1154 1081
1155#ifdef CONFIG_E1000_MQ
1156 adapter->rx_sched_call_data.func = e1000_rx_schedule;
1157 adapter->rx_sched_call_data.info = adapter->netdev;
1158
1159 adapter->cpu_netdev = alloc_percpu(struct net_device *);
1160 adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
1161#endif
1162
1163 return E1000_SUCCESS; 1082 return E1000_SUCCESS;
1164} 1083}
1165 1084
1166#ifdef CONFIG_E1000_MQ
1167static void __devinit
1168e1000_setup_queue_mapping(struct e1000_adapter *adapter)
1169{
1170 int i, cpu;
1171
1172 adapter->rx_sched_call_data.func = e1000_rx_schedule;
1173 adapter->rx_sched_call_data.info = adapter->netdev;
1174 cpus_clear(adapter->rx_sched_call_data.cpumask);
1175
1176 adapter->cpu_netdev = alloc_percpu(struct net_device *);
1177 adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
1178
1179 lock_cpu_hotplug();
1180 i = 0;
1181 for_each_online_cpu(cpu) {
1182 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
1183 /* This is incomplete because we'd like to assign separate
1184 * physical cpus to these netdev polling structures and
1185 * avoid saturating a subset of cpus.
1186 */
1187 if (i < adapter->num_rx_queues) {
1188 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
1189 adapter->rx_ring[i].cpu = cpu;
1190 cpu_set(cpu, adapter->cpumask);
1191 } else
1192 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
1193
1194 i++;
1195 }
1196 unlock_cpu_hotplug();
1197}
1198#endif
1199
1200/** 1085/**
1201 * e1000_open - Called when a network interface is made active 1086 * e1000_open - Called when a network interface is made active
1202 * @netdev: network interface device structure 1087 * @netdev: network interface device structure
@@ -1435,18 +1320,6 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1435 /* Setup the HW Tx Head and Tail descriptor pointers */ 1320 /* Setup the HW Tx Head and Tail descriptor pointers */
1436 1321
1437 switch (adapter->num_tx_queues) { 1322 switch (adapter->num_tx_queues) {
1438 case 2:
1439 tdba = adapter->tx_ring[1].dma;
1440 tdlen = adapter->tx_ring[1].count *
1441 sizeof(struct e1000_tx_desc);
1442 E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
1443 E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
1444 E1000_WRITE_REG(hw, TDLEN1, tdlen);
1445 E1000_WRITE_REG(hw, TDH1, 0);
1446 E1000_WRITE_REG(hw, TDT1, 0);
1447 adapter->tx_ring[1].tdh = E1000_TDH1;
1448 adapter->tx_ring[1].tdt = E1000_TDT1;
1449 /* Fall Through */
1450 case 1: 1323 case 1:
1451 default: 1324 default:
1452 tdba = adapter->tx_ring[0].dma; 1325 tdba = adapter->tx_ring[0].dma;
@@ -1477,6 +1350,10 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1477 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1350 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1478 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1351 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1479 break; 1352 break;
1353 case e1000_80003es2lan:
1354 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1355 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1356 break;
1480 default: 1357 default:
1481 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1358 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1482 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1359 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
@@ -1497,10 +1374,13 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1497 tctl = E1000_READ_REG(hw, TCTL); 1374 tctl = E1000_READ_REG(hw, TCTL);
1498 1375
1499 tctl &= ~E1000_TCTL_CT; 1376 tctl &= ~E1000_TCTL_CT;
1500 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC | 1377 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1501 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1378 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1502 1379
1503 E1000_WRITE_REG(hw, TCTL, tctl); 1380#ifdef DISABLE_MULR
1381 /* disable Multiple Reads for debugging */
1382 tctl &= ~E1000_TCTL_MULR;
1383#endif
1504 1384
1505 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { 1385 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1506 tarc = E1000_READ_REG(hw, TARC0); 1386 tarc = E1000_READ_REG(hw, TARC0);
@@ -1513,6 +1393,15 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1513 else 1393 else
1514 tarc |= (1 << 28); 1394 tarc |= (1 << 28);
1515 E1000_WRITE_REG(hw, TARC1, tarc); 1395 E1000_WRITE_REG(hw, TARC1, tarc);
1396 } else if (hw->mac_type == e1000_80003es2lan) {
1397 tarc = E1000_READ_REG(hw, TARC0);
1398 tarc |= 1;
1399 if (hw->media_type == e1000_media_type_internal_serdes)
1400 tarc |= (1 << 20);
1401 E1000_WRITE_REG(hw, TARC0, tarc);
1402 tarc = E1000_READ_REG(hw, TARC1);
1403 tarc |= 1;
1404 E1000_WRITE_REG(hw, TARC1, tarc);
1516 } 1405 }
1517 1406
1518 e1000_config_collision_dist(hw); 1407 e1000_config_collision_dist(hw);
@@ -1531,6 +1420,9 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1531 if (hw->mac_type == e1000_82544 && 1420 if (hw->mac_type == e1000_82544 &&
1532 hw->bus_type == e1000_bus_type_pcix) 1421 hw->bus_type == e1000_bus_type_pcix)
1533 adapter->pcix_82544 = 1; 1422 adapter->pcix_82544 = 1;
1423
1424 E1000_WRITE_REG(hw, TCTL, tctl);
1425
1534} 1426}
1535 1427
1536/** 1428/**
@@ -1790,12 +1682,9 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1790 uint64_t rdba; 1682 uint64_t rdba;
1791 struct e1000_hw *hw = &adapter->hw; 1683 struct e1000_hw *hw = &adapter->hw;
1792 uint32_t rdlen, rctl, rxcsum, ctrl_ext; 1684 uint32_t rdlen, rctl, rxcsum, ctrl_ext;
1793#ifdef CONFIG_E1000_MQ
1794 uint32_t reta, mrqc;
1795 int i;
1796#endif
1797 1685
1798 if (adapter->rx_ps_pages) { 1686 if (adapter->rx_ps_pages) {
1687 /* this is a 32 byte descriptor */
1799 rdlen = adapter->rx_ring[0].count * 1688 rdlen = adapter->rx_ring[0].count *
1800 sizeof(union e1000_rx_desc_packet_split); 1689 sizeof(union e1000_rx_desc_packet_split);
1801 adapter->clean_rx = e1000_clean_rx_irq_ps; 1690 adapter->clean_rx = e1000_clean_rx_irq_ps;
@@ -1837,18 +1726,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1837 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1726 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1838 * the Base and Length of the Rx Descriptor Ring */ 1727 * the Base and Length of the Rx Descriptor Ring */
1839 switch (adapter->num_rx_queues) { 1728 switch (adapter->num_rx_queues) {
1840#ifdef CONFIG_E1000_MQ
1841 case 2:
1842 rdba = adapter->rx_ring[1].dma;
1843 E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
1844 E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32));
1845 E1000_WRITE_REG(hw, RDLEN1, rdlen);
1846 E1000_WRITE_REG(hw, RDH1, 0);
1847 E1000_WRITE_REG(hw, RDT1, 0);
1848 adapter->rx_ring[1].rdh = E1000_RDH1;
1849 adapter->rx_ring[1].rdt = E1000_RDT1;
1850 /* Fall Through */
1851#endif
1852 case 1: 1729 case 1:
1853 default: 1730 default:
1854 rdba = adapter->rx_ring[0].dma; 1731 rdba = adapter->rx_ring[0].dma;
@@ -1862,46 +1739,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1862 break; 1739 break;
1863 } 1740 }
1864 1741
1865#ifdef CONFIG_E1000_MQ
1866 if (adapter->num_rx_queues > 1) {
1867 uint32_t random[10];
1868
1869 get_random_bytes(&random[0], 40);
1870
1871 if (hw->mac_type <= e1000_82572) {
1872 E1000_WRITE_REG(hw, RSSIR, 0);
1873 E1000_WRITE_REG(hw, RSSIM, 0);
1874 }
1875
1876 switch (adapter->num_rx_queues) {
1877 case 2:
1878 default:
1879 reta = 0x00800080;
1880 mrqc = E1000_MRQC_ENABLE_RSS_2Q;
1881 break;
1882 }
1883
1884 /* Fill out redirection table */
1885 for (i = 0; i < 32; i++)
1886 E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
1887 /* Fill out hash function seeds */
1888 for (i = 0; i < 10; i++)
1889 E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
1890
1891 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
1892 E1000_MRQC_RSS_FIELD_IPV4_TCP);
1893 E1000_WRITE_REG(hw, MRQC, mrqc);
1894 }
1895
1896 /* Multiqueue and packet checksumming are mutually exclusive. */
1897 if (hw->mac_type >= e1000_82571) {
1898 rxcsum = E1000_READ_REG(hw, RXCSUM);
1899 rxcsum |= E1000_RXCSUM_PCSD;
1900 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1901 }
1902
1903#else
1904
1905 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1742 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1906 if (hw->mac_type >= e1000_82543) { 1743 if (hw->mac_type >= e1000_82543) {
1907 rxcsum = E1000_READ_REG(hw, RXCSUM); 1744 rxcsum = E1000_READ_REG(hw, RXCSUM);
@@ -1920,7 +1757,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1920 } 1757 }
1921 E1000_WRITE_REG(hw, RXCSUM, rxcsum); 1758 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1922 } 1759 }
1923#endif /* CONFIG_E1000_MQ */
1924 1760
1925 if (hw->mac_type == e1000_82573) 1761 if (hw->mac_type == e1000_82573)
1926 E1000_WRITE_REG(hw, ERT, 0x0100); 1762 E1000_WRITE_REG(hw, ERT, 0x0100);
@@ -2392,7 +2228,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2392{ 2228{
2393 struct net_device *netdev = adapter->netdev; 2229 struct net_device *netdev = adapter->netdev;
2394 struct e1000_tx_ring *txdr = adapter->tx_ring; 2230 struct e1000_tx_ring *txdr = adapter->tx_ring;
2395 uint32_t link; 2231 uint32_t link, tctl;
2396 2232
2397 e1000_check_for_link(&adapter->hw); 2233 e1000_check_for_link(&adapter->hw);
2398 if (adapter->hw.mac_type == e1000_82573) { 2234 if (adapter->hw.mac_type == e1000_82573) {
@@ -2418,20 +2254,61 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2418 adapter->link_duplex == FULL_DUPLEX ? 2254 adapter->link_duplex == FULL_DUPLEX ?
2419 "Full Duplex" : "Half Duplex"); 2255 "Full Duplex" : "Half Duplex");
2420 2256
2421 /* tweak tx_queue_len according to speed/duplex */ 2257 /* tweak tx_queue_len according to speed/duplex
2258 * and adjust the timeout factor */
2422 netdev->tx_queue_len = adapter->tx_queue_len; 2259 netdev->tx_queue_len = adapter->tx_queue_len;
2423 adapter->tx_timeout_factor = 1; 2260 adapter->tx_timeout_factor = 1;
2424 if (adapter->link_duplex == HALF_DUPLEX) { 2261 adapter->txb2b = 1;
2262 switch (adapter->link_speed) {
2263 case SPEED_10:
2264 adapter->txb2b = 0;
2265 netdev->tx_queue_len = 10;
2266 adapter->tx_timeout_factor = 8;
2267 break;
2268 case SPEED_100:
2269 adapter->txb2b = 0;
2270 netdev->tx_queue_len = 100;
2271 /* maybe add some timeout factor ? */
2272 break;
2273 }
2274
2275 if ((adapter->hw.mac_type == e1000_82571 ||
2276 adapter->hw.mac_type == e1000_82572) &&
2277 adapter->txb2b == 0) {
2278#define SPEED_MODE_BIT (1 << 21)
2279 uint32_t tarc0;
2280 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2281 tarc0 &= ~SPEED_MODE_BIT;
2282 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2283 }
2284
2285#ifdef NETIF_F_TSO
2286 /* disable TSO for pcie and 10/100 speeds, to avoid
2287 * some hardware issues */
2288 if (!adapter->tso_force &&
2289 adapter->hw.bus_type == e1000_bus_type_pci_express){
2425 switch (adapter->link_speed) { 2290 switch (adapter->link_speed) {
2426 case SPEED_10: 2291 case SPEED_10:
2427 netdev->tx_queue_len = 10;
2428 adapter->tx_timeout_factor = 8;
2429 break;
2430 case SPEED_100: 2292 case SPEED_100:
2431 netdev->tx_queue_len = 100; 2293 DPRINTK(PROBE,INFO,
2294 "10/100 speed: disabling TSO\n");
2295 netdev->features &= ~NETIF_F_TSO;
2296 break;
2297 case SPEED_1000:
2298 netdev->features |= NETIF_F_TSO;
2299 break;
2300 default:
2301 /* oops */
2432 break; 2302 break;
2433 } 2303 }
2434 } 2304 }
2305#endif
2306
2307 /* enable transmits in the hardware, need to do this
2308 * after setting TARC0 */
2309 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2310 tctl |= E1000_TCTL_EN;
2311 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
2435 2312
2436 netif_carrier_on(netdev); 2313 netif_carrier_on(netdev);
2437 netif_wake_queue(netdev); 2314 netif_wake_queue(netdev);
@@ -2446,6 +2323,16 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2446 netif_carrier_off(netdev); 2323 netif_carrier_off(netdev);
2447 netif_stop_queue(netdev); 2324 netif_stop_queue(netdev);
2448 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); 2325 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2326
2327 /* 80003ES2LAN workaround--
2328 * For packet buffer work-around on link down event;
2329 * disable receives in the ISR and
2330 * reset device here in the watchdog
2331 */
2332 if (adapter->hw.mac_type == e1000_80003es2lan) {
2333 /* reset device */
2334 schedule_work(&adapter->reset_task);
2335 }
2449 } 2336 }
2450 2337
2451 e1000_smartspeed(adapter); 2338 e1000_smartspeed(adapter);
@@ -2465,16 +2352,14 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2465 2352
2466 e1000_update_adaptive(&adapter->hw); 2353 e1000_update_adaptive(&adapter->hw);
2467 2354
2468#ifdef CONFIG_E1000_MQ
2469 txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
2470#endif
2471 if (!netif_carrier_ok(netdev)) { 2355 if (!netif_carrier_ok(netdev)) {
2472 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2356 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2473 /* We've lost link, so the controller stops DMA, 2357 /* We've lost link, so the controller stops DMA,
2474 * but we've got queued Tx work that's never going 2358 * but we've got queued Tx work that's never going
2475 * to get done, so reset controller to flush Tx. 2359 * to get done, so reset controller to flush Tx.
2476 * (Do the reset outside of interrupt context). */ 2360 * (Do the reset outside of interrupt context). */
2477 schedule_work(&adapter->tx_timeout_task); 2361 adapter->tx_timeout_count++;
2362 schedule_work(&adapter->reset_task);
2478 } 2363 }
2479 } 2364 }
2480 2365
@@ -2649,9 +2534,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2649 /* Workaround for Controller erratum -- 2534 /* Workaround for Controller erratum --
2650 * descriptor for non-tso packet in a linear SKB that follows a 2535 * descriptor for non-tso packet in a linear SKB that follows a
2651 * tso gets written back prematurely before the data is fully 2536 * tso gets written back prematurely before the data is fully
2652 * DMAd to the controller */ 2537 * DMA'd to the controller */
2653 if (!skb->data_len && tx_ring->last_tx_tso && 2538 if (!skb->data_len && tx_ring->last_tx_tso &&
2654 !skb_shinfo(skb)->tso_size) { 2539 !skb_shinfo(skb)->tso_size) {
2655 tx_ring->last_tx_tso = 0; 2540 tx_ring->last_tx_tso = 0;
2656 size -= 4; 2541 size -= 4;
2657 } 2542 }
@@ -2840,7 +2725,7 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2840 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 2725 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2841 return 0; 2726 return 0;
2842 } 2727 }
2843 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 2728 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
2844 struct ethhdr *eth = (struct ethhdr *) skb->data; 2729 struct ethhdr *eth = (struct ethhdr *) skb->data;
2845 if ((htons(ETH_P_IP) == eth->h_proto)) { 2730 if ((htons(ETH_P_IP) == eth->h_proto)) {
2846 const struct iphdr *ip = 2731 const struct iphdr *ip =
@@ -2881,11 +2766,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2881 unsigned int f; 2766 unsigned int f;
2882 len -= skb->data_len; 2767 len -= skb->data_len;
2883 2768
2884#ifdef CONFIG_E1000_MQ
2885 tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
2886#else
2887 tx_ring = adapter->tx_ring; 2769 tx_ring = adapter->tx_ring;
2888#endif
2889 2770
2890 if (unlikely(skb->len <= 0)) { 2771 if (unlikely(skb->len <= 0)) {
2891 dev_kfree_skb_any(skb); 2772 dev_kfree_skb_any(skb);
@@ -2905,21 +2786,29 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2905 max_per_txd = min(mss << 2, max_per_txd); 2786 max_per_txd = min(mss << 2, max_per_txd);
2906 max_txd_pwr = fls(max_per_txd) - 1; 2787 max_txd_pwr = fls(max_per_txd) - 1;
2907 2788
2908 /* TSO Workaround for 82571/2 Controllers -- if skb->data 2789 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
2909 * points to just header, pull a few bytes of payload from 2790 * points to just header, pull a few bytes of payload from
2910 * frags into skb->data */ 2791 * frags into skb->data */
2911 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2792 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2912 if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) && 2793 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
2913 (adapter->hw.mac_type == e1000_82571 || 2794 switch (adapter->hw.mac_type) {
2914 adapter->hw.mac_type == e1000_82572)) { 2795 unsigned int pull_size;
2915 unsigned int pull_size; 2796 case e1000_82571:
2916 pull_size = min((unsigned int)4, skb->data_len); 2797 case e1000_82572:
2917 if (!__pskb_pull_tail(skb, pull_size)) { 2798 case e1000_82573:
2918 printk(KERN_ERR "__pskb_pull_tail failed.\n"); 2799 pull_size = min((unsigned int)4, skb->data_len);
2919 dev_kfree_skb_any(skb); 2800 if (!__pskb_pull_tail(skb, pull_size)) {
2920 return NETDEV_TX_OK; 2801 printk(KERN_ERR
2802 "__pskb_pull_tail failed.\n");
2803 dev_kfree_skb_any(skb);
2804 return NETDEV_TX_OK;
2805 }
2806 len = skb->len - skb->data_len;
2807 break;
2808 default:
2809 /* do nothing */
2810 break;
2921 } 2811 }
2922 len = skb->len - skb->data_len;
2923 } 2812 }
2924 } 2813 }
2925 2814
@@ -2935,7 +2824,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2935#ifdef NETIF_F_TSO 2824#ifdef NETIF_F_TSO
2936 /* Controller Erratum workaround */ 2825 /* Controller Erratum workaround */
2937 if (!skb->data_len && tx_ring->last_tx_tso && 2826 if (!skb->data_len && tx_ring->last_tx_tso &&
2938 !skb_shinfo(skb)->tso_size) 2827 !skb_shinfo(skb)->tso_size)
2939 count++; 2828 count++;
2940#endif 2829#endif
2941 2830
@@ -2958,7 +2847,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2958 if (adapter->pcix_82544) 2847 if (adapter->pcix_82544)
2959 count += nr_frags; 2848 count += nr_frags;
2960 2849
2961 if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2850
2851 if (adapter->hw.tx_pkt_filtering &&
2852 (adapter->hw.mac_type == e1000_82573))
2962 e1000_transfer_dhcp_info(adapter, skb); 2853 e1000_transfer_dhcp_info(adapter, skb);
2963 2854
2964 local_irq_save(flags); 2855 local_irq_save(flags);
@@ -3036,15 +2927,15 @@ e1000_tx_timeout(struct net_device *netdev)
3036 struct e1000_adapter *adapter = netdev_priv(netdev); 2927 struct e1000_adapter *adapter = netdev_priv(netdev);
3037 2928
3038 /* Do the reset outside of interrupt context */ 2929 /* Do the reset outside of interrupt context */
3039 schedule_work(&adapter->tx_timeout_task); 2930 adapter->tx_timeout_count++;
2931 schedule_work(&adapter->reset_task);
3040} 2932}
3041 2933
3042static void 2934static void
3043e1000_tx_timeout_task(struct net_device *netdev) 2935e1000_reset_task(struct net_device *netdev)
3044{ 2936{
3045 struct e1000_adapter *adapter = netdev_priv(netdev); 2937 struct e1000_adapter *adapter = netdev_priv(netdev);
3046 2938
3047 adapter->tx_timeout_count++;
3048 e1000_down(adapter); 2939 e1000_down(adapter);
3049 e1000_up(adapter); 2940 e1000_up(adapter);
3050} 2941}
@@ -3079,6 +2970,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3079{ 2970{
3080 struct e1000_adapter *adapter = netdev_priv(netdev); 2971 struct e1000_adapter *adapter = netdev_priv(netdev);
3081 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 2972 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2973 uint16_t eeprom_data = 0;
3082 2974
3083 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 2975 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3084 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2976 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3090,14 +2982,28 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3090 switch (adapter->hw.mac_type) { 2982 switch (adapter->hw.mac_type) {
3091 case e1000_82542_rev2_0: 2983 case e1000_82542_rev2_0:
3092 case e1000_82542_rev2_1: 2984 case e1000_82542_rev2_1:
3093 case e1000_82573:
3094 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 2985 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3095 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 2986 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3096 return -EINVAL; 2987 return -EINVAL;
3097 } 2988 }
3098 break; 2989 break;
2990 case e1000_82573:
2991 /* only enable jumbo frames if ASPM is disabled completely
2992 * this means both bits must be zero in 0x1A bits 3:2 */
2993 e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1,
2994 &eeprom_data);
2995 if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
2996 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2997 DPRINTK(PROBE, ERR,
2998 "Jumbo Frames not supported.\n");
2999 return -EINVAL;
3000 }
3001 break;
3002 }
3003 /* fall through to get support */
3099 case e1000_82571: 3004 case e1000_82571:
3100 case e1000_82572: 3005 case e1000_82572:
3006 case e1000_80003es2lan:
3101#define MAX_STD_JUMBO_FRAME_SIZE 9234 3007#define MAX_STD_JUMBO_FRAME_SIZE 9234
3102 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3008 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3103 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); 3009 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
@@ -3251,11 +3157,15 @@ e1000_update_stats(struct e1000_adapter *adapter)
3251 3157
3252 /* Rx Errors */ 3158 /* Rx Errors */
3253 3159
3160 /* RLEC on some newer hardware can be incorrect so build
3161 * our own version based on RUC and ROC */
3254 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3162 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3255 adapter->stats.crcerrs + adapter->stats.algnerrc + 3163 adapter->stats.crcerrs + adapter->stats.algnerrc +
3256 adapter->stats.rlec + adapter->stats.cexterr; 3164 adapter->stats.ruc + adapter->stats.roc +
3165 adapter->stats.cexterr;
3257 adapter->net_stats.rx_dropped = 0; 3166 adapter->net_stats.rx_dropped = 0;
3258 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3167 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3168 adapter->stats.roc;
3259 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3169 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3260 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3170 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3261 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3171 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
@@ -3288,29 +3198,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
3288 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3198 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3289} 3199}
3290 3200
3291#ifdef CONFIG_E1000_MQ
3292void
3293e1000_rx_schedule(void *data)
3294{
3295 struct net_device *poll_dev, *netdev = data;
3296 struct e1000_adapter *adapter = netdev->priv;
3297 int this_cpu = get_cpu();
3298
3299 poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
3300 if (poll_dev == NULL) {
3301 put_cpu();
3302 return;
3303 }
3304
3305 if (likely(netif_rx_schedule_prep(poll_dev)))
3306 __netif_rx_schedule(poll_dev);
3307 else
3308 e1000_irq_enable(adapter);
3309
3310 put_cpu();
3311}
3312#endif
3313
3314/** 3201/**
3315 * e1000_intr - Interrupt Handler 3202 * e1000_intr - Interrupt Handler
3316 * @irq: interrupt number 3203 * @irq: interrupt number
@@ -3324,7 +3211,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3324 struct net_device *netdev = data; 3211 struct net_device *netdev = data;
3325 struct e1000_adapter *adapter = netdev_priv(netdev); 3212 struct e1000_adapter *adapter = netdev_priv(netdev);
3326 struct e1000_hw *hw = &adapter->hw; 3213 struct e1000_hw *hw = &adapter->hw;
3327 uint32_t icr = E1000_READ_REG(hw, ICR); 3214 uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
3328#ifndef CONFIG_E1000_NAPI 3215#ifndef CONFIG_E1000_NAPI
3329 int i; 3216 int i;
3330#else 3217#else
@@ -3346,6 +3233,17 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3346 3233
3347 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3234 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3348 hw->get_link_status = 1; 3235 hw->get_link_status = 1;
3236 /* 80003ES2LAN workaround--
3237 * For packet buffer work-around on link down event;
3238 * disable receives here in the ISR and
3239 * reset adapter in watchdog
3240 */
3241 if (netif_carrier_ok(netdev) &&
3242 (adapter->hw.mac_type == e1000_80003es2lan)) {
3243 /* disable receives */
3244 rctl = E1000_READ_REG(hw, RCTL);
3245 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3246 }
3349 mod_timer(&adapter->watchdog_timer, jiffies); 3247 mod_timer(&adapter->watchdog_timer, jiffies);
3350 } 3248 }
3351 3249
@@ -3355,26 +3253,11 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3355 E1000_WRITE_REG(hw, IMC, ~0); 3253 E1000_WRITE_REG(hw, IMC, ~0);
3356 E1000_WRITE_FLUSH(hw); 3254 E1000_WRITE_FLUSH(hw);
3357 } 3255 }
3358#ifdef CONFIG_E1000_MQ
3359 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
3360 /* We must setup the cpumask once count == 0 since
3361 * each cpu bit is cleared when the work is done. */
3362 adapter->rx_sched_call_data.cpumask = adapter->cpumask;
3363 atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
3364 atomic_set(&adapter->rx_sched_call_data.count,
3365 adapter->num_rx_queues);
3366 smp_call_async_mask(&adapter->rx_sched_call_data);
3367 } else {
3368 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
3369 }
3370#else /* if !CONFIG_E1000_MQ */
3371 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) 3256 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
3372 __netif_rx_schedule(&adapter->polling_netdev[0]); 3257 __netif_rx_schedule(&adapter->polling_netdev[0]);
3373 else 3258 else
3374 e1000_irq_enable(adapter); 3259 e1000_irq_enable(adapter);
3375#endif /* CONFIG_E1000_MQ */ 3260#else
3376
3377#else /* if !CONFIG_E1000_NAPI */
3378 /* Writing IMC and IMS is needed for 82547. 3261 /* Writing IMC and IMS is needed for 82547.
3379 * Due to Hub Link bus being occupied, an interrupt 3262 * Due to Hub Link bus being occupied, an interrupt
3380 * de-assertion message is not able to be sent. 3263 * de-assertion message is not able to be sent.
@@ -3398,7 +3281,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3398 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3281 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3399 e1000_irq_enable(adapter); 3282 e1000_irq_enable(adapter);
3400 3283
3401#endif /* CONFIG_E1000_NAPI */ 3284#endif
3402 3285
3403 return IRQ_HANDLED; 3286 return IRQ_HANDLED;
3404} 3287}
@@ -3474,6 +3357,9 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3474 struct e1000_tx_desc *tx_desc, *eop_desc; 3357 struct e1000_tx_desc *tx_desc, *eop_desc;
3475 struct e1000_buffer *buffer_info; 3358 struct e1000_buffer *buffer_info;
3476 unsigned int i, eop; 3359 unsigned int i, eop;
3360#ifdef CONFIG_E1000_NAPI
3361 unsigned int count = 0;
3362#endif
3477 boolean_t cleaned = FALSE; 3363 boolean_t cleaned = FALSE;
3478 3364
3479 i = tx_ring->next_to_clean; 3365 i = tx_ring->next_to_clean;
@@ -3486,21 +3372,20 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3486 buffer_info = &tx_ring->buffer_info[i]; 3372 buffer_info = &tx_ring->buffer_info[i];
3487 cleaned = (i == eop); 3373 cleaned = (i == eop);
3488 3374
3489#ifdef CONFIG_E1000_MQ
3490 tx_ring->tx_stats.bytes += buffer_info->length;
3491#endif
3492 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3375 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3493 memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); 3376 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3494 3377
3495 if (unlikely(++i == tx_ring->count)) i = 0; 3378 if (unlikely(++i == tx_ring->count)) i = 0;
3496 } 3379 }
3497 3380
3498#ifdef CONFIG_E1000_MQ
3499 tx_ring->tx_stats.packets++;
3500#endif
3501 3381
3502 eop = tx_ring->buffer_info[i].next_to_watch; 3382 eop = tx_ring->buffer_info[i].next_to_watch;
3503 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3383 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3384#ifdef CONFIG_E1000_NAPI
3385#define E1000_TX_WEIGHT 64
3386 /* weight of a sort for tx, to avoid endless transmit cleanup */
3387 if (count++ == E1000_TX_WEIGHT) break;
3388#endif
3504 } 3389 }
3505 3390
3506 tx_ring->next_to_clean = i; 3391 tx_ring->next_to_clean = i;
@@ -3519,7 +3404,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3519 adapter->detect_tx_hung = FALSE; 3404 adapter->detect_tx_hung = FALSE;
3520 if (tx_ring->buffer_info[eop].dma && 3405 if (tx_ring->buffer_info[eop].dma &&
3521 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3406 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3522 adapter->tx_timeout_factor * HZ) 3407 (adapter->tx_timeout_factor * HZ))
3523 && !(E1000_READ_REG(&adapter->hw, STATUS) & 3408 && !(E1000_READ_REG(&adapter->hw, STATUS) &
3524 E1000_STATUS_TXOFF)) { 3409 E1000_STATUS_TXOFF)) {
3525 3410
@@ -3644,10 +3529,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3644 skb = buffer_info->skb; 3529 skb = buffer_info->skb;
3645 buffer_info->skb = NULL; 3530 buffer_info->skb = NULL;
3646 3531
3532 prefetch(skb->data - NET_IP_ALIGN);
3533
3647 if (++i == rx_ring->count) i = 0; 3534 if (++i == rx_ring->count) i = 0;
3648 next_rxd = E1000_RX_DESC(*rx_ring, i); 3535 next_rxd = E1000_RX_DESC(*rx_ring, i);
3536 prefetch(next_rxd);
3537
3649 next_buffer = &rx_ring->buffer_info[i]; 3538 next_buffer = &rx_ring->buffer_info[i];
3650 next_skb = next_buffer->skb; 3539 next_skb = next_buffer->skb;
3540 prefetch(next_skb->data - NET_IP_ALIGN);
3651 3541
3652 cleaned = TRUE; 3542 cleaned = TRUE;
3653 cleaned_count++; 3543 cleaned_count++;
@@ -3733,10 +3623,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3733 } 3623 }
3734#endif /* CONFIG_E1000_NAPI */ 3624#endif /* CONFIG_E1000_NAPI */
3735 netdev->last_rx = jiffies; 3625 netdev->last_rx = jiffies;
3736#ifdef CONFIG_E1000_MQ
3737 rx_ring->rx_stats.packets++;
3738 rx_ring->rx_stats.bytes += length;
3739#endif
3740 3626
3741next_desc: 3627next_desc:
3742 rx_desc->status = 0; 3628 rx_desc->status = 0;
@@ -3747,6 +3633,7 @@ next_desc:
3747 cleaned_count = 0; 3633 cleaned_count = 0;
3748 } 3634 }
3749 3635
3636 /* use prefetched values */
3750 rx_desc = next_rxd; 3637 rx_desc = next_rxd;
3751 buffer_info = next_buffer; 3638 buffer_info = next_buffer;
3752 } 3639 }
@@ -3789,9 +3676,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3789 i = rx_ring->next_to_clean; 3676 i = rx_ring->next_to_clean;
3790 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3677 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3791 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3678 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3792 buffer_info = &rx_ring->buffer_info[i];
3793 3679
3794 while (staterr & E1000_RXD_STAT_DD) { 3680 while (staterr & E1000_RXD_STAT_DD) {
3681 buffer_info = &rx_ring->buffer_info[i];
3795 ps_page = &rx_ring->ps_page[i]; 3682 ps_page = &rx_ring->ps_page[i];
3796 ps_page_dma = &rx_ring->ps_page_dma[i]; 3683 ps_page_dma = &rx_ring->ps_page_dma[i];
3797#ifdef CONFIG_E1000_NAPI 3684#ifdef CONFIG_E1000_NAPI
@@ -3801,10 +3688,16 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3801#endif 3688#endif
3802 skb = buffer_info->skb; 3689 skb = buffer_info->skb;
3803 3690
3691 /* in the packet split case this is header only */
3692 prefetch(skb->data - NET_IP_ALIGN);
3693
3804 if (++i == rx_ring->count) i = 0; 3694 if (++i == rx_ring->count) i = 0;
3805 next_rxd = E1000_RX_DESC_PS(*rx_ring, i); 3695 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
3696 prefetch(next_rxd);
3697
3806 next_buffer = &rx_ring->buffer_info[i]; 3698 next_buffer = &rx_ring->buffer_info[i];
3807 next_skb = next_buffer->skb; 3699 next_skb = next_buffer->skb;
3700 prefetch(next_skb->data - NET_IP_ALIGN);
3808 3701
3809 cleaned = TRUE; 3702 cleaned = TRUE;
3810 cleaned_count++; 3703 cleaned_count++;
@@ -3836,23 +3729,49 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3836 /* Good Receive */ 3729 /* Good Receive */
3837 skb_put(skb, length); 3730 skb_put(skb, length);
3838 3731
3732 {
3733 /* this looks ugly, but it seems compiler issues make it
3734 more efficient than reusing j */
3735 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
3736
3737 /* page alloc/put takes too long and effects small packet
3738 * throughput, so unsplit small packets and save the alloc/put*/
3739 if (l1 && ((length + l1) < E1000_CB_LENGTH)) {
3740 u8 *vaddr;
3741 /* there is no documentation about how to call
3742 * kmap_atomic, so we can't hold the mapping
3743 * very long */
3744 pci_dma_sync_single_for_cpu(pdev,
3745 ps_page_dma->ps_page_dma[0],
3746 PAGE_SIZE,
3747 PCI_DMA_FROMDEVICE);
3748 vaddr = kmap_atomic(ps_page->ps_page[0],
3749 KM_SKB_DATA_SOFTIRQ);
3750 memcpy(skb->tail, vaddr, l1);
3751 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
3752 pci_dma_sync_single_for_device(pdev,
3753 ps_page_dma->ps_page_dma[0],
3754 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3755 skb_put(skb, l1);
3756 length += l1;
3757 goto copydone;
3758 } /* if */
3759 }
3760
3839 for (j = 0; j < adapter->rx_ps_pages; j++) { 3761 for (j = 0; j < adapter->rx_ps_pages; j++) {
3840 if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3762 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
3841 break; 3763 break;
3842
3843 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 3764 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
3844 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3765 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3845 ps_page_dma->ps_page_dma[j] = 0; 3766 ps_page_dma->ps_page_dma[j] = 0;
3846 skb_shinfo(skb)->frags[j].page = 3767 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
3847 ps_page->ps_page[j]; 3768 length);
3848 ps_page->ps_page[j] = NULL; 3769 ps_page->ps_page[j] = NULL;
3849 skb_shinfo(skb)->frags[j].page_offset = 0;
3850 skb_shinfo(skb)->frags[j].size = length;
3851 skb_shinfo(skb)->nr_frags++;
3852 skb->len += length; 3770 skb->len += length;
3853 skb->data_len += length; 3771 skb->data_len += length;
3854 } 3772 }
3855 3773
3774copydone:
3856 e1000_rx_checksum(adapter, staterr, 3775 e1000_rx_checksum(adapter, staterr,
3857 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 3776 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
3858 skb->protocol = eth_type_trans(skb, netdev); 3777 skb->protocol = eth_type_trans(skb, netdev);
@@ -3878,10 +3797,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3878 } 3797 }
3879#endif /* CONFIG_E1000_NAPI */ 3798#endif /* CONFIG_E1000_NAPI */
3880 netdev->last_rx = jiffies; 3799 netdev->last_rx = jiffies;
3881#ifdef CONFIG_E1000_MQ
3882 rx_ring->rx_stats.packets++;
3883 rx_ring->rx_stats.bytes += length;
3884#endif
3885 3800
3886next_desc: 3801next_desc:
3887 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); 3802 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
@@ -3893,6 +3808,7 @@ next_desc:
3893 cleaned_count = 0; 3808 cleaned_count = 0;
3894 } 3809 }
3895 3810
3811 /* use prefetched values */
3896 rx_desc = next_rxd; 3812 rx_desc = next_rxd;
3897 buffer_info = next_buffer; 3813 buffer_info = next_buffer;
3898 3814
@@ -3936,7 +3852,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3936 goto map_skb; 3852 goto map_skb;
3937 } 3853 }
3938 3854
3939
3940 if (unlikely(!skb)) { 3855 if (unlikely(!skb)) {
3941 /* Better luck next round */ 3856 /* Better luck next round */
3942 adapter->alloc_rx_buff_failed++; 3857 adapter->alloc_rx_buff_failed++;
@@ -4242,7 +4157,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4242 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4157 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4243 return -EIO; 4158 return -EIO;
4244 } 4159 }
4245 if (adapter->hw.phy_type == e1000_phy_m88) { 4160 if (adapter->hw.phy_type == e1000_media_type_copper) {
4246 switch (data->reg_num) { 4161 switch (data->reg_num) {
4247 case PHY_CTRL: 4162 case PHY_CTRL:
4248 if (mii_reg & MII_CR_POWER_DOWN) 4163 if (mii_reg & MII_CR_POWER_DOWN)
@@ -4258,8 +4173,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4258 else 4173 else
4259 spddplx = SPEED_10; 4174 spddplx = SPEED_10;
4260 spddplx += (mii_reg & 0x100) 4175 spddplx += (mii_reg & 0x100)
4261 ? FULL_DUPLEX : 4176 ? DUPLEX_FULL :
4262 HALF_DUPLEX; 4177 DUPLEX_HALF;
4263 retval = e1000_set_spd_dplx(adapter, 4178 retval = e1000_set_spd_dplx(adapter,
4264 spddplx); 4179 spddplx);
4265 if (retval) { 4180 if (retval) {
@@ -4489,8 +4404,8 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4489} 4404}
4490 4405
4491#ifdef CONFIG_PM 4406#ifdef CONFIG_PM
4492/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config 4407/* Save/restore 16 or 64 dwords of PCI config space depending on which
4493 * space versus the 64 bytes that pci_[save|restore]_state handle 4408 * bus we're on (PCI(X) vs. PCI-E)
4494 */ 4409 */
4495#define PCIE_CONFIG_SPACE_LEN 256 4410#define PCIE_CONFIG_SPACE_LEN 256
4496#define PCI_CONFIG_SPACE_LEN 64 4411#define PCI_CONFIG_SPACE_LEN 64
@@ -4500,6 +4415,7 @@ e1000_pci_save_state(struct e1000_adapter *adapter)
4500 struct pci_dev *dev = adapter->pdev; 4415 struct pci_dev *dev = adapter->pdev;
4501 int size; 4416 int size;
4502 int i; 4417 int i;
4418
4503 if (adapter->hw.mac_type >= e1000_82571) 4419 if (adapter->hw.mac_type >= e1000_82571)
4504 size = PCIE_CONFIG_SPACE_LEN; 4420 size = PCIE_CONFIG_SPACE_LEN;
4505 else 4421 else
@@ -4523,8 +4439,10 @@ e1000_pci_restore_state(struct e1000_adapter *adapter)
4523 struct pci_dev *dev = adapter->pdev; 4439 struct pci_dev *dev = adapter->pdev;
4524 int size; 4440 int size;
4525 int i; 4441 int i;
4442
4526 if (adapter->config_space == NULL) 4443 if (adapter->config_space == NULL)
4527 return; 4444 return;
4445
4528 if (adapter->hw.mac_type >= e1000_82571) 4446 if (adapter->hw.mac_type >= e1000_82571)
4529 size = PCIE_CONFIG_SPACE_LEN; 4447 size = PCIE_CONFIG_SPACE_LEN;
4530 else 4448 else
@@ -4552,8 +4470,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4552 e1000_down(adapter); 4470 e1000_down(adapter);
4553 4471
4554#ifdef CONFIG_PM 4472#ifdef CONFIG_PM
4555 /* implement our own version of pci_save_state(pdev) because pci 4473 /* Implement our own version of pci_save_state(pdev) because pci-
4556 * express adapters have larger 256 byte config spaces */ 4474 * express adapters have 256-byte config spaces. */
4557 retval = e1000_pci_save_state(adapter); 4475 retval = e1000_pci_save_state(adapter);
4558 if (retval) 4476 if (retval)
4559 return retval; 4477 return retval;
@@ -4610,7 +4528,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4610 retval = pci_enable_wake(pdev, PCI_D3hot, 0); 4528 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4611 if (retval) 4529 if (retval)
4612 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); 4530 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4613 retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ 4531 retval = pci_enable_wake(pdev, PCI_D3cold, 0);
4614 if (retval) 4532 if (retval)
4615 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); 4533 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4616 } 4534 }
@@ -4626,7 +4544,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4626 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); 4544 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4627 retval = pci_enable_wake(pdev, PCI_D3cold, 1); 4545 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4628 if (retval) 4546 if (retval)
4629 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); 4547 DPRINTK(PROBE, ERR,
4548 "Error enabling D3 cold wake\n");
4630 } 4549 }
4631 } 4550 }
4632 4551
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 3768d83cd577..e0a4d37d1b85 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -268,7 +268,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
268 BUG(); 268 BUG();
269 } 269 }
270 270
271 DPRINTK(PROBE, INFO, "Invalid %s specified (%i) %s\n", 271 DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
272 opt->name, *value, opt->err); 272 opt->name, *value, opt->err);
273 *value = opt->def; 273 *value = opt->def;
274 return -1; 274 return -1;
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 8c62ced2c9b2..467fc861360d 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -27,7 +27,7 @@
27 rx_align support: enables rx DMA without causing unaligned accesses. 27 rx_align support: enables rx DMA without causing unaligned accesses.
28*/ 28*/
29 29
30static const char *version = 30static const char * const version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" 31"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; 32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33 33
@@ -469,7 +469,7 @@ static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
469 0x31, 0x05, }; 469 0x31, 0x05, };
470 470
471/* PHY media interface chips. */ 471/* PHY media interface chips. */
472static const char *phys[] = { 472static const char * const phys[] = {
473 "None", "i82553-A/B", "i82553-C", "i82503", 473 "None", "i82553-A/B", "i82553-C", "i82503",
474 "DP83840", "80c240", "80c24", "i82555", 474 "DP83840", "80c240", "80c24", "i82555",
475 "unknown-8", "unknown-9", "DP83840A", "unknown-11", 475 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index f119ec4e89ea..2f7b86837fe8 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -225,7 +225,7 @@ struct epic_chip_info {
225 225
226 226
227/* indexed by chip_t */ 227/* indexed by chip_t */
228static struct epic_chip_info pci_id_tbl[] = { 228static const struct epic_chip_info pci_id_tbl[] = {
229 { "SMSC EPIC/100 83c170", 229 { "SMSC EPIC/100 83c170",
230 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN }, 230 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
231 { "SMSC EPIC/100 83c170", 231 { "SMSC EPIC/100 83c170",
@@ -291,7 +291,7 @@ enum CommandBits {
291 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) 291 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
292#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) 292#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
293 293
294static u16 media2miictl[16] = { 294static const u16 media2miictl[16] = {
295 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, 295 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
296 0, 0, 0, 0, 0, 0, 0, 0 }; 296 0, 0, 0, 0, 0, 0, 0, 0 };
297 297
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index f32a6b3acb2a..b67545be2caa 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -161,6 +161,7 @@ static char *version =
161#include <linux/etherdevice.h> 161#include <linux/etherdevice.h>
162#include <linux/skbuff.h> 162#include <linux/skbuff.h>
163#include <linux/bitops.h> 163#include <linux/bitops.h>
164#include <linux/jiffies.h>
164 165
165#include <asm/system.h> 166#include <asm/system.h>
166#include <asm/io.h> 167#include <asm/io.h>
@@ -754,7 +755,7 @@ static void eth16i_set_port(int ioaddr, int porttype)
754 755
755static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l) 756static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
756{ 757{
757 int starttime; 758 unsigned long starttime;
758 759
759 outb(0xff, ioaddr + TX_STATUS_REG); 760 outb(0xff, ioaddr + TX_STATUS_REG);
760 761
@@ -765,7 +766,7 @@ static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
765 outb(TX_START | 1, ioaddr + TRANSMIT_START_REG); 766 outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
766 767
767 while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) { 768 while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
768 if( (jiffies - starttime) > TX_TIMEOUT) { 769 if( time_after(jiffies, starttime + TX_TIMEOUT)) {
769 return -1; 770 return -1;
770 } 771 }
771 } 772 }
@@ -775,18 +776,18 @@ static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
775 776
776static int eth16i_receive_probe_packet(int ioaddr) 777static int eth16i_receive_probe_packet(int ioaddr)
777{ 778{
778 int starttime; 779 unsigned long starttime;
779 780
780 starttime = jiffies; 781 starttime = jiffies;
781 782
782 while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) { 783 while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
783 if( (jiffies - starttime) > TX_TIMEOUT) { 784 if( time_after(jiffies, starttime + TX_TIMEOUT)) {
784 785
785 if(eth16i_debug > 1) 786 if(eth16i_debug > 1)
786 printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n"); 787 printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n");
787 starttime = jiffies; 788 starttime = jiffies;
788 while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) { 789 while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
789 if( (jiffies - starttime) > TX_TIMEOUT) { 790 if( time_after(jiffies, starttime + TX_TIMEOUT)) {
790 if(eth16i_debug > 1) 791 if(eth16i_debug > 1)
791 printk(KERN_DEBUG "Timeout occurred waiting receive packet\n"); 792 printk(KERN_DEBUG "Timeout occurred waiting receive packet\n");
792 return -1; 793 return -1;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 55dbe9a3fd56..a8449265e5fd 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -160,7 +160,7 @@ struct chip_info {
160 int flags; 160 int flags;
161}; 161};
162 162
163static struct chip_info skel_netdrv_tbl[] = { 163static const struct chip_info skel_netdrv_tbl[] = {
164 {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, 164 {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
165 {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR}, 165 {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR},
166 {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, 166 {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3682ec61e8a8..e7fc28b07e5a 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -102,6 +102,9 @@
102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
104 * 0.49: 10 Dec 2005: Fix tso for large buffers. 104 * 0.49: 10 Dec 2005: Fix tso for large buffers.
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
105 * 108 *
106 * Known bugs: 109 * Known bugs:
107 * We suspect that on some hardware no TX done interrupts are generated. 110 * We suspect that on some hardware no TX done interrupts are generated.
@@ -113,7 +116,7 @@
113 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 116 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
114 * superfluous timer interrupts from the nic. 117 * superfluous timer interrupts from the nic.
115 */ 118 */
116#define FORCEDETH_VERSION "0.49" 119#define FORCEDETH_VERSION "0.52"
117#define DRV_NAME "forcedeth" 120#define DRV_NAME "forcedeth"
118 121
119#include <linux/module.h> 122#include <linux/module.h>
@@ -153,6 +156,9 @@
153#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 156#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
154#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 157#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
155#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 158#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
159#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
160#define DEV_HAS_MSI 0x0040 /* device supports MSI */
161#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
156 162
157enum { 163enum {
158 NvRegIrqStatus = 0x000, 164 NvRegIrqStatus = 0x000,
@@ -166,14 +172,17 @@ enum {
166#define NVREG_IRQ_TX_OK 0x0010 172#define NVREG_IRQ_TX_OK 0x0010
167#define NVREG_IRQ_TIMER 0x0020 173#define NVREG_IRQ_TIMER 0x0020
168#define NVREG_IRQ_LINK 0x0040 174#define NVREG_IRQ_LINK 0x0040
169#define NVREG_IRQ_TX_ERROR 0x0080 175#define NVREG_IRQ_RX_FORCED 0x0080
170#define NVREG_IRQ_TX1 0x0100 176#define NVREG_IRQ_TX_FORCED 0x0100
171#define NVREG_IRQMASK_THROUGHPUT 0x00df 177#define NVREG_IRQMASK_THROUGHPUT 0x00df
172#define NVREG_IRQMASK_CPU 0x0040 178#define NVREG_IRQMASK_CPU 0x0040
179#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
180#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
181#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
173 182
174#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 183#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
175 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ 184 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
176 NVREG_IRQ_TX1)) 185 NVREG_IRQ_TX_FORCED))
177 186
178 NvRegUnknownSetupReg6 = 0x008, 187 NvRegUnknownSetupReg6 = 0x008,
179#define NVREG_UNKSETUP6_VAL 3 188#define NVREG_UNKSETUP6_VAL 3
@@ -185,6 +194,10 @@ enum {
185 NvRegPollingInterval = 0x00c, 194 NvRegPollingInterval = 0x00c,
186#define NVREG_POLL_DEFAULT_THROUGHPUT 970 195#define NVREG_POLL_DEFAULT_THROUGHPUT 970
187#define NVREG_POLL_DEFAULT_CPU 13 196#define NVREG_POLL_DEFAULT_CPU 13
197 NvRegMSIMap0 = 0x020,
198 NvRegMSIMap1 = 0x024,
199 NvRegMSIIrqMask = 0x030,
200#define NVREG_MSI_VECTOR_0_ENABLED 0x01
188 NvRegMisc1 = 0x080, 201 NvRegMisc1 = 0x080,
189#define NVREG_MISC1_HD 0x02 202#define NVREG_MISC1_HD 0x02
190#define NVREG_MISC1_FORCE 0x3b0f3c 203#define NVREG_MISC1_FORCE 0x3b0f3c
@@ -254,6 +267,10 @@ enum {
254#define NVREG_TXRXCTL_DESC_1 0 267#define NVREG_TXRXCTL_DESC_1 0
255#define NVREG_TXRXCTL_DESC_2 0x02100 268#define NVREG_TXRXCTL_DESC_2 0x02100
256#define NVREG_TXRXCTL_DESC_3 0x02200 269#define NVREG_TXRXCTL_DESC_3 0x02200
270#define NVREG_TXRXCTL_VLANSTRIP 0x00040
271#define NVREG_TXRXCTL_VLANINS 0x00080
272 NvRegTxRingPhysAddrHigh = 0x148,
273 NvRegRxRingPhysAddrHigh = 0x14C,
257 NvRegMIIStatus = 0x180, 274 NvRegMIIStatus = 0x180,
258#define NVREG_MIISTAT_ERROR 0x0001 275#define NVREG_MIISTAT_ERROR 0x0001
259#define NVREG_MIISTAT_LINKCHANGE 0x0008 276#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -303,6 +320,11 @@ enum {
303#define NVREG_POWERSTATE_D1 0x0001 320#define NVREG_POWERSTATE_D1 0x0001
304#define NVREG_POWERSTATE_D2 0x0002 321#define NVREG_POWERSTATE_D2 0x0002
305#define NVREG_POWERSTATE_D3 0x0003 322#define NVREG_POWERSTATE_D3 0x0003
323 NvRegVlanControl = 0x300,
324#define NVREG_VLANCONTROL_ENABLE 0x2000
325 NvRegMSIXMap0 = 0x3e0,
326 NvRegMSIXMap1 = 0x3e4,
327 NvRegMSIXIrqStatus = 0x3f0,
306}; 328};
307 329
308/* Big endian: should work, but is untested */ 330/* Big endian: should work, but is untested */
@@ -314,7 +336,7 @@ struct ring_desc {
314struct ring_desc_ex { 336struct ring_desc_ex {
315 u32 PacketBufferHigh; 337 u32 PacketBufferHigh;
316 u32 PacketBufferLow; 338 u32 PacketBufferLow;
317 u32 Reserved; 339 u32 TxVlan;
318 u32 FlagLen; 340 u32 FlagLen;
319}; 341};
320 342
@@ -355,6 +377,8 @@ typedef union _ring_type {
355#define NV_TX2_CHECKSUM_L3 (1<<27) 377#define NV_TX2_CHECKSUM_L3 (1<<27)
356#define NV_TX2_CHECKSUM_L4 (1<<26) 378#define NV_TX2_CHECKSUM_L4 (1<<26)
357 379
380#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
381
358#define NV_RX_DESCRIPTORVALID (1<<16) 382#define NV_RX_DESCRIPTORVALID (1<<16)
359#define NV_RX_MISSEDFRAME (1<<17) 383#define NV_RX_MISSEDFRAME (1<<17)
360#define NV_RX_SUBSTRACT1 (1<<18) 384#define NV_RX_SUBSTRACT1 (1<<18)
@@ -385,6 +409,9 @@ typedef union _ring_type {
385#define NV_RX2_ERROR (1<<30) 409#define NV_RX2_ERROR (1<<30)
386#define NV_RX2_AVAIL (1<<31) 410#define NV_RX2_AVAIL (1<<31)
387 411
412#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
413#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
414
388/* Miscelaneous hardware related defines: */ 415/* Miscelaneous hardware related defines: */
389#define NV_PCI_REGSZ 0x270 416#define NV_PCI_REGSZ 0x270
390 417
@@ -475,6 +502,18 @@ typedef union _ring_type {
475#define LPA_1000FULL 0x0800 502#define LPA_1000FULL 0x0800
476#define LPA_1000HALF 0x0400 503#define LPA_1000HALF 0x0400
477 504
505/* MSI/MSI-X defines */
506#define NV_MSI_X_MAX_VECTORS 8
507#define NV_MSI_X_VECTORS_MASK 0x000f
508#define NV_MSI_CAPABLE 0x0010
509#define NV_MSI_X_CAPABLE 0x0020
510#define NV_MSI_ENABLED 0x0040
511#define NV_MSI_X_ENABLED 0x0080
512
513#define NV_MSI_X_VECTOR_ALL 0x0
514#define NV_MSI_X_VECTOR_RX 0x0
515#define NV_MSI_X_VECTOR_TX 0x1
516#define NV_MSI_X_VECTOR_OTHER 0x2
478 517
479/* 518/*
480 * SMP locking: 519 * SMP locking:
@@ -511,6 +550,7 @@ struct fe_priv {
511 u32 irqmask; 550 u32 irqmask;
512 u32 desc_ver; 551 u32 desc_ver;
513 u32 txrxctl_bits; 552 u32 txrxctl_bits;
553 u32 vlanctl_bits;
514 554
515 void __iomem *base; 555 void __iomem *base;
516 556
@@ -525,6 +565,7 @@ struct fe_priv {
525 unsigned int pkt_limit; 565 unsigned int pkt_limit;
526 struct timer_list oom_kick; 566 struct timer_list oom_kick;
527 struct timer_list nic_poll; 567 struct timer_list nic_poll;
568 u32 nic_poll_irq;
528 569
529 /* media detection workaround. 570 /* media detection workaround.
530 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 571 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -540,6 +581,13 @@ struct fe_priv {
540 dma_addr_t tx_dma[TX_RING]; 581 dma_addr_t tx_dma[TX_RING];
541 unsigned int tx_dma_len[TX_RING]; 582 unsigned int tx_dma_len[TX_RING];
542 u32 tx_flags; 583 u32 tx_flags;
584
585 /* vlan fields */
586 struct vlan_group *vlangrp;
587
588 /* msi/msi-x fields */
589 u32 msi_flags;
590 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
543}; 591};
544 592
545/* 593/*
@@ -567,6 +615,16 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
567 */ 615 */
568static int poll_interval = -1; 616static int poll_interval = -1;
569 617
618/*
619 * Disable MSI interrupts
620 */
621static int disable_msi = 0;
622
623/*
624 * Disable MSIX interrupts
625 */
626static int disable_msix = 0;
627
570static inline struct fe_priv *get_nvpriv(struct net_device *dev) 628static inline struct fe_priv *get_nvpriv(struct net_device *dev)
571{ 629{
572 return netdev_priv(dev); 630 return netdev_priv(dev);
@@ -612,6 +670,33 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
612 return 0; 670 return 0;
613} 671}
614 672
673#define NV_SETUP_RX_RING 0x01
674#define NV_SETUP_TX_RING 0x02
675
676static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
677{
678 struct fe_priv *np = get_nvpriv(dev);
679 u8 __iomem *base = get_hwbase(dev);
680
681 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
682 if (rxtx_flags & NV_SETUP_RX_RING) {
683 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
684 }
685 if (rxtx_flags & NV_SETUP_TX_RING) {
686 writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
687 }
688 } else {
689 if (rxtx_flags & NV_SETUP_RX_RING) {
690 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
691 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
692 }
693 if (rxtx_flags & NV_SETUP_TX_RING) {
694 writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
695 writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
696 }
697 }
698}
699
615#define MII_READ (-1) 700#define MII_READ (-1)
616/* mii_rw: read/write a register on the PHY. 701/* mii_rw: read/write a register on the PHY.
617 * 702 *
@@ -903,14 +988,27 @@ static void nv_do_rx_refill(unsigned long data)
903 struct net_device *dev = (struct net_device *) data; 988 struct net_device *dev = (struct net_device *) data;
904 struct fe_priv *np = netdev_priv(dev); 989 struct fe_priv *np = netdev_priv(dev);
905 990
906 disable_irq(dev->irq); 991
992 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
993 ((np->msi_flags & NV_MSI_X_ENABLED) &&
994 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
995 disable_irq(dev->irq);
996 } else {
997 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
998 }
907 if (nv_alloc_rx(dev)) { 999 if (nv_alloc_rx(dev)) {
908 spin_lock(&np->lock); 1000 spin_lock(&np->lock);
909 if (!np->in_shutdown) 1001 if (!np->in_shutdown)
910 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1002 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
911 spin_unlock(&np->lock); 1003 spin_unlock(&np->lock);
912 } 1004 }
913 enable_irq(dev->irq); 1005 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1006 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1007 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1008 enable_irq(dev->irq);
1009 } else {
1010 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1011 }
914} 1012}
915 1013
916static void nv_init_rx(struct net_device *dev) 1014static void nv_init_rx(struct net_device *dev)
@@ -965,7 +1063,7 @@ static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
965 } 1063 }
966 1064
967 if (np->tx_skbuff[skbnr]) { 1065 if (np->tx_skbuff[skbnr]) {
968 dev_kfree_skb_irq(np->tx_skbuff[skbnr]); 1066 dev_kfree_skb_any(np->tx_skbuff[skbnr]);
969 np->tx_skbuff[skbnr] = NULL; 1067 np->tx_skbuff[skbnr] = NULL;
970 return 1; 1068 return 1;
971 } else { 1069 } else {
@@ -1031,6 +1129,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1031 u32 bcnt; 1129 u32 bcnt;
1032 u32 size = skb->len-skb->data_len; 1130 u32 size = skb->len-skb->data_len;
1033 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1131 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1132 u32 tx_flags_vlan = 0;
1034 1133
1035 /* add fragments to entries count */ 1134 /* add fragments to entries count */
1036 for (i = 0; i < fragments; i++) { 1135 for (i = 0; i < fragments; i++) {
@@ -1111,10 +1210,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1111#endif 1210#endif
1112 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1211 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
1113 1212
1213 /* vlan tag */
1214 if (np->vlangrp && vlan_tx_tag_present(skb)) {
1215 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
1216 }
1217
1114 /* set tx flags */ 1218 /* set tx flags */
1115 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1219 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1116 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1220 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1117 } else { 1221 } else {
1222 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
1118 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1223 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1119 } 1224 }
1120 1225
@@ -1209,9 +1314,14 @@ static void nv_tx_timeout(struct net_device *dev)
1209{ 1314{
1210 struct fe_priv *np = netdev_priv(dev); 1315 struct fe_priv *np = netdev_priv(dev);
1211 u8 __iomem *base = get_hwbase(dev); 1316 u8 __iomem *base = get_hwbase(dev);
1317 u32 status;
1318
1319 if (np->msi_flags & NV_MSI_X_ENABLED)
1320 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
1321 else
1322 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1212 1323
1213 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, 1324 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
1214 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
1215 1325
1216 { 1326 {
1217 int i; 1327 int i;
@@ -1273,10 +1383,7 @@ static void nv_tx_timeout(struct net_device *dev)
1273 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1383 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1274 nv_drain_tx(dev); 1384 nv_drain_tx(dev);
1275 np->next_tx = np->nic_tx = 0; 1385 np->next_tx = np->nic_tx = 0;
1276 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1386 setup_hw_rings(dev, NV_SETUP_TX_RING);
1277 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1278 else
1279 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1280 netif_wake_queue(dev); 1387 netif_wake_queue(dev);
1281 } 1388 }
1282 1389
@@ -1342,6 +1449,8 @@ static void nv_rx_process(struct net_device *dev)
1342{ 1449{
1343 struct fe_priv *np = netdev_priv(dev); 1450 struct fe_priv *np = netdev_priv(dev);
1344 u32 Flags; 1451 u32 Flags;
1452 u32 vlanflags = 0;
1453
1345 1454
1346 for (;;) { 1455 for (;;) {
1347 struct sk_buff *skb; 1456 struct sk_buff *skb;
@@ -1357,6 +1466,7 @@ static void nv_rx_process(struct net_device *dev)
1357 } else { 1466 } else {
1358 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1467 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
1359 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1468 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1469 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
1360 } 1470 }
1361 1471
1362 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1472 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
@@ -1474,7 +1584,11 @@ static void nv_rx_process(struct net_device *dev)
1474 skb->protocol = eth_type_trans(skb, dev); 1584 skb->protocol = eth_type_trans(skb, dev);
1475 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1585 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1476 dev->name, np->cur_rx, len, skb->protocol); 1586 dev->name, np->cur_rx, len, skb->protocol);
1477 netif_rx(skb); 1587 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
1588 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
1589 } else {
1590 netif_rx(skb);
1591 }
1478 dev->last_rx = jiffies; 1592 dev->last_rx = jiffies;
1479 np->stats.rx_packets++; 1593 np->stats.rx_packets++;
1480 np->stats.rx_bytes += len; 1594 np->stats.rx_bytes += len;
@@ -1523,7 +1637,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1523 * guessed, there is probably a simpler approach. 1637 * guessed, there is probably a simpler approach.
1524 * Changing the MTU is a rare event, it shouldn't matter. 1638 * Changing the MTU is a rare event, it shouldn't matter.
1525 */ 1639 */
1526 disable_irq(dev->irq); 1640 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1641 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1642 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1643 disable_irq(dev->irq);
1644 } else {
1645 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1646 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1647 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1648 }
1527 spin_lock_bh(&dev->xmit_lock); 1649 spin_lock_bh(&dev->xmit_lock);
1528 spin_lock(&np->lock); 1650 spin_lock(&np->lock);
1529 /* stop engines */ 1651 /* stop engines */
@@ -1544,11 +1666,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1544 } 1666 }
1545 /* reinit nic view of the rx queue */ 1667 /* reinit nic view of the rx queue */
1546 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 1668 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1547 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 1669 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
1548 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1549 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1550 else
1551 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1552 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 1670 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1553 base + NvRegRingSizes); 1671 base + NvRegRingSizes);
1554 pci_push(base); 1672 pci_push(base);
@@ -1560,7 +1678,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1560 nv_start_tx(dev); 1678 nv_start_tx(dev);
1561 spin_unlock(&np->lock); 1679 spin_unlock(&np->lock);
1562 spin_unlock_bh(&dev->xmit_lock); 1680 spin_unlock_bh(&dev->xmit_lock);
1563 enable_irq(dev->irq); 1681 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1682 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1683 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1684 enable_irq(dev->irq);
1685 } else {
1686 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1687 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1688 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1689 }
1564 } 1690 }
1565 return 0; 1691 return 0;
1566} 1692}
@@ -1866,8 +1992,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1866 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 1992 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
1867 1993
1868 for (i=0; ; i++) { 1994 for (i=0; ; i++) {
1869 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 1995 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
1870 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 1996 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1997 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1998 } else {
1999 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2000 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2001 }
1871 pci_push(base); 2002 pci_push(base);
1872 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2003 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
1873 if (!(events & np->irqmask)) 2004 if (!(events & np->irqmask))
@@ -1907,11 +2038,16 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1907 if (i > max_interrupt_work) { 2038 if (i > max_interrupt_work) {
1908 spin_lock(&np->lock); 2039 spin_lock(&np->lock);
1909 /* disable interrupts on the nic */ 2040 /* disable interrupts on the nic */
1910 writel(0, base + NvRegIrqMask); 2041 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2042 writel(0, base + NvRegIrqMask);
2043 else
2044 writel(np->irqmask, base + NvRegIrqMask);
1911 pci_push(base); 2045 pci_push(base);
1912 2046
1913 if (!np->in_shutdown) 2047 if (!np->in_shutdown) {
2048 np->nic_poll_irq = np->irqmask;
1914 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2049 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2050 }
1915 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 2051 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
1916 spin_unlock(&np->lock); 2052 spin_unlock(&np->lock);
1917 break; 2053 break;
@@ -1923,22 +2059,212 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1923 return IRQ_RETVAL(i); 2059 return IRQ_RETVAL(i);
1924} 2060}
1925 2061
2062static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2063{
2064 struct net_device *dev = (struct net_device *) data;
2065 struct fe_priv *np = netdev_priv(dev);
2066 u8 __iomem *base = get_hwbase(dev);
2067 u32 events;
2068 int i;
2069
2070 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
2071
2072 for (i=0; ; i++) {
2073 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
2074 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
2075 pci_push(base);
2076 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
2077 if (!(events & np->irqmask))
2078 break;
2079
2080 spin_lock(&np->lock);
2081 nv_tx_done(dev);
2082 spin_unlock(&np->lock);
2083
2084 if (events & (NVREG_IRQ_TX_ERR)) {
2085 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2086 dev->name, events);
2087 }
2088 if (i > max_interrupt_work) {
2089 spin_lock(&np->lock);
2090 /* disable interrupts on the nic */
2091 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2092 pci_push(base);
2093
2094 if (!np->in_shutdown) {
2095 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
2096 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2097 }
2098 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2099 spin_unlock(&np->lock);
2100 break;
2101 }
2102
2103 }
2104 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
2105
2106 return IRQ_RETVAL(i);
2107}
2108
2109static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2110{
2111 struct net_device *dev = (struct net_device *) data;
2112 struct fe_priv *np = netdev_priv(dev);
2113 u8 __iomem *base = get_hwbase(dev);
2114 u32 events;
2115 int i;
2116
2117 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
2118
2119 for (i=0; ; i++) {
2120 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2121 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2122 pci_push(base);
2123 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
2124 if (!(events & np->irqmask))
2125 break;
2126
2127 nv_rx_process(dev);
2128 if (nv_alloc_rx(dev)) {
2129 spin_lock(&np->lock);
2130 if (!np->in_shutdown)
2131 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2132 spin_unlock(&np->lock);
2133 }
2134
2135 if (i > max_interrupt_work) {
2136 spin_lock(&np->lock);
2137 /* disable interrupts on the nic */
2138 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2139 pci_push(base);
2140
2141 if (!np->in_shutdown) {
2142 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
2143 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2144 }
2145 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2146 spin_unlock(&np->lock);
2147 break;
2148 }
2149
2150 }
2151 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2152
2153 return IRQ_RETVAL(i);
2154}
2155
2156static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2157{
2158 struct net_device *dev = (struct net_device *) data;
2159 struct fe_priv *np = netdev_priv(dev);
2160 u8 __iomem *base = get_hwbase(dev);
2161 u32 events;
2162 int i;
2163
2164 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
2165
2166 for (i=0; ; i++) {
2167 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
2168 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
2169 pci_push(base);
2170 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2171 if (!(events & np->irqmask))
2172 break;
2173
2174 if (events & NVREG_IRQ_LINK) {
2175 spin_lock(&np->lock);
2176 nv_link_irq(dev);
2177 spin_unlock(&np->lock);
2178 }
2179 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2180 spin_lock(&np->lock);
2181 nv_linkchange(dev);
2182 spin_unlock(&np->lock);
2183 np->link_timeout = jiffies + LINK_TIMEOUT;
2184 }
2185 if (events & (NVREG_IRQ_UNKNOWN)) {
2186 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2187 dev->name, events);
2188 }
2189 if (i > max_interrupt_work) {
2190 spin_lock(&np->lock);
2191 /* disable interrupts on the nic */
2192 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2193 pci_push(base);
2194
2195 if (!np->in_shutdown) {
2196 np->nic_poll_irq |= NVREG_IRQ_OTHER;
2197 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2198 }
2199 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2200 spin_unlock(&np->lock);
2201 break;
2202 }
2203
2204 }
2205 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
2206
2207 return IRQ_RETVAL(i);
2208}
2209
1926static void nv_do_nic_poll(unsigned long data) 2210static void nv_do_nic_poll(unsigned long data)
1927{ 2211{
1928 struct net_device *dev = (struct net_device *) data; 2212 struct net_device *dev = (struct net_device *) data;
1929 struct fe_priv *np = netdev_priv(dev); 2213 struct fe_priv *np = netdev_priv(dev);
1930 u8 __iomem *base = get_hwbase(dev); 2214 u8 __iomem *base = get_hwbase(dev);
2215 u32 mask = 0;
1931 2216
1932 disable_irq(dev->irq);
1933 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
1934 /* 2217 /*
2218 * First disable irq(s) and then
1935 * reenable interrupts on the nic, we have to do this before calling 2219 * reenable interrupts on the nic, we have to do this before calling
1936 * nv_nic_irq because that may decide to do otherwise 2220 * nv_nic_irq because that may decide to do otherwise
1937 */ 2221 */
1938 writel(np->irqmask, base + NvRegIrqMask); 2222
2223 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
2224 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2225 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2226 disable_irq(dev->irq);
2227 mask = np->irqmask;
2228 } else {
2229 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2230 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2231 mask |= NVREG_IRQ_RX_ALL;
2232 }
2233 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2234 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2235 mask |= NVREG_IRQ_TX_ALL;
2236 }
2237 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2238 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2239 mask |= NVREG_IRQ_OTHER;
2240 }
2241 }
2242 np->nic_poll_irq = 0;
2243
2244 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
2245
2246 writel(mask, base + NvRegIrqMask);
1939 pci_push(base); 2247 pci_push(base);
1940 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2248
1941 enable_irq(dev->irq); 2249 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
2250 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2251 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2252 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2253 enable_irq(dev->irq);
2254 } else {
2255 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2256 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
2257 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2258 }
2259 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2260 nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
2261 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2262 }
2263 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2264 nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL);
2265 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2266 }
2267 }
1942} 2268}
1943 2269
1944#ifdef CONFIG_NET_POLL_CONTROLLER 2270#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2217,11 +2543,66 @@ static struct ethtool_ops ops = {
2217 .get_perm_addr = ethtool_op_get_perm_addr, 2543 .get_perm_addr = ethtool_op_get_perm_addr,
2218}; 2544};
2219 2545
2546static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2547{
2548 struct fe_priv *np = get_nvpriv(dev);
2549
2550 spin_lock_irq(&np->lock);
2551
2552 /* save vlan group */
2553 np->vlangrp = grp;
2554
2555 if (grp) {
2556 /* enable vlan on MAC */
2557 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
2558 } else {
2559 /* disable vlan on MAC */
2560 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
2561 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
2562 }
2563
2564 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2565
2566 spin_unlock_irq(&np->lock);
2567};
2568
2569static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2570{
2571 /* nothing to do */
2572};
2573
2574static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2575{
2576 u8 __iomem *base = get_hwbase(dev);
2577 int i;
2578 u32 msixmap = 0;
2579
2580 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
2581 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
2582 * the remaining 8 interrupts.
2583 */
2584 for (i = 0; i < 8; i++) {
2585 if ((irqmask >> i) & 0x1) {
2586 msixmap |= vector << (i << 2);
2587 }
2588 }
2589 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
2590
2591 msixmap = 0;
2592 for (i = 0; i < 8; i++) {
2593 if ((irqmask >> (i + 8)) & 0x1) {
2594 msixmap |= vector << (i << 2);
2595 }
2596 }
2597 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2598}
2599
2220static int nv_open(struct net_device *dev) 2600static int nv_open(struct net_device *dev)
2221{ 2601{
2222 struct fe_priv *np = netdev_priv(dev); 2602 struct fe_priv *np = netdev_priv(dev);
2223 u8 __iomem *base = get_hwbase(dev); 2603 u8 __iomem *base = get_hwbase(dev);
2224 int ret, oom, i; 2604 int ret = 1;
2605 int oom, i;
2225 2606
2226 dprintk(KERN_DEBUG "nv_open: begin\n"); 2607 dprintk(KERN_DEBUG "nv_open: begin\n");
2227 2608
@@ -2253,11 +2634,7 @@ static int nv_open(struct net_device *dev)
2253 nv_copy_mac_to_hw(dev); 2634 nv_copy_mac_to_hw(dev);
2254 2635
2255 /* 4) give hw rings */ 2636 /* 4) give hw rings */
2256 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 2637 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2257 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2258 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2259 else
2260 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
2261 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 2638 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
2262 base + NvRegRingSizes); 2639 base + NvRegRingSizes);
2263 2640
@@ -2265,6 +2642,7 @@ static int nv_open(struct net_device *dev)
2265 writel(np->linkspeed, base + NvRegLinkSpeed); 2642 writel(np->linkspeed, base + NvRegLinkSpeed);
2266 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); 2643 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
2267 writel(np->txrxctl_bits, base + NvRegTxRxControl); 2644 writel(np->txrxctl_bits, base + NvRegTxRxControl);
2645 writel(np->vlanctl_bits, base + NvRegVlanControl);
2268 pci_push(base); 2646 pci_push(base);
2269 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 2647 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
2270 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 2648 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
@@ -2315,9 +2693,77 @@ static int nv_open(struct net_device *dev)
2315 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2693 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2316 pci_push(base); 2694 pci_push(base);
2317 2695
2318 ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev); 2696 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2319 if (ret) 2697 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2320 goto out_drain; 2698 np->msi_x_entry[i].entry = i;
2699 }
2700 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2701 np->msi_flags |= NV_MSI_X_ENABLED;
2702 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2703 /* Request irq for rx handling */
2704 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2705 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2706 pci_disable_msix(np->pci_dev);
2707 np->msi_flags &= ~NV_MSI_X_ENABLED;
2708 goto out_drain;
2709 }
2710 /* Request irq for tx handling */
2711 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2712 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2713 pci_disable_msix(np->pci_dev);
2714 np->msi_flags &= ~NV_MSI_X_ENABLED;
2715 goto out_drain;
2716 }
2717 /* Request irq for link and timer handling */
2718 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2719 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2720 pci_disable_msix(np->pci_dev);
2721 np->msi_flags &= ~NV_MSI_X_ENABLED;
2722 goto out_drain;
2723 }
2724
2725 /* map interrupts to their respective vector */
2726 writel(0, base + NvRegMSIXMap0);
2727 writel(0, base + NvRegMSIXMap1);
2728 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2729 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2730 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2731 } else {
2732 /* Request irq for all interrupts */
2733 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2734 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2735 pci_disable_msix(np->pci_dev);
2736 np->msi_flags &= ~NV_MSI_X_ENABLED;
2737 goto out_drain;
2738 }
2739
2740 /* map interrupts to vector 0 */
2741 writel(0, base + NvRegMSIXMap0);
2742 writel(0, base + NvRegMSIXMap1);
2743 }
2744 }
2745 }
2746 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2747 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2748 np->msi_flags |= NV_MSI_ENABLED;
2749 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2750 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2751 pci_disable_msi(np->pci_dev);
2752 np->msi_flags &= ~NV_MSI_ENABLED;
2753 goto out_drain;
2754 }
2755
2756 /* map interrupts to vector 0 */
2757 writel(0, base + NvRegMSIMap0);
2758 writel(0, base + NvRegMSIMap1);
2759 /* enable msi vector 0 */
2760 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2761 }
2762 }
2763 if (ret != 0) {
2764 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2765 goto out_drain;
2766 }
2321 2767
2322 /* ask for interrupts */ 2768 /* ask for interrupts */
2323 writel(np->irqmask, base + NvRegIrqMask); 2769 writel(np->irqmask, base + NvRegIrqMask);
@@ -2364,6 +2810,7 @@ static int nv_close(struct net_device *dev)
2364{ 2810{
2365 struct fe_priv *np = netdev_priv(dev); 2811 struct fe_priv *np = netdev_priv(dev);
2366 u8 __iomem *base; 2812 u8 __iomem *base;
2813 int i;
2367 2814
2368 spin_lock_irq(&np->lock); 2815 spin_lock_irq(&np->lock);
2369 np->in_shutdown = 1; 2816 np->in_shutdown = 1;
@@ -2381,13 +2828,31 @@ static int nv_close(struct net_device *dev)
2381 2828
2382 /* disable interrupts on the nic or we will lock up */ 2829 /* disable interrupts on the nic or we will lock up */
2383 base = get_hwbase(dev); 2830 base = get_hwbase(dev);
2384 writel(0, base + NvRegIrqMask); 2831 if (np->msi_flags & NV_MSI_X_ENABLED) {
2832 writel(np->irqmask, base + NvRegIrqMask);
2833 } else {
2834 if (np->msi_flags & NV_MSI_ENABLED)
2835 writel(0, base + NvRegMSIIrqMask);
2836 writel(0, base + NvRegIrqMask);
2837 }
2385 pci_push(base); 2838 pci_push(base);
2386 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 2839 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
2387 2840
2388 spin_unlock_irq(&np->lock); 2841 spin_unlock_irq(&np->lock);
2389 2842
2390 free_irq(dev->irq, dev); 2843 if (np->msi_flags & NV_MSI_X_ENABLED) {
2844 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2845 free_irq(np->msi_x_entry[i].vector, dev);
2846 }
2847 pci_disable_msix(np->pci_dev);
2848 np->msi_flags &= ~NV_MSI_X_ENABLED;
2849 } else {
2850 free_irq(np->pci_dev->irq, dev);
2851 if (np->msi_flags & NV_MSI_ENABLED) {
2852 pci_disable_msi(np->pci_dev);
2853 np->msi_flags &= ~NV_MSI_ENABLED;
2854 }
2855 }
2391 2856
2392 drain_ring(dev); 2857 drain_ring(dev);
2393 2858
@@ -2471,7 +2936,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2471 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 2936 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2472 pci_name(pci_dev)); 2937 pci_name(pci_dev));
2473 } else { 2938 } else {
2474 dev->features |= NETIF_F_HIGHDMA; 2939 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2940 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
2941 pci_name(pci_dev));
2942 goto out_relreg;
2943 } else {
2944 dev->features |= NETIF_F_HIGHDMA;
2945 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2946 }
2475 } 2947 }
2476 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 2948 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2477 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 2949 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
@@ -2496,6 +2968,22 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2496#endif 2968#endif
2497 } 2969 }
2498 2970
2971 np->vlanctl_bits = 0;
2972 if (id->driver_data & DEV_HAS_VLAN) {
2973 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
2974 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
2975 dev->vlan_rx_register = nv_vlan_rx_register;
2976 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
2977 }
2978
2979 np->msi_flags = 0;
2980 if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) {
2981 np->msi_flags |= NV_MSI_CAPABLE;
2982 }
2983 if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) {
2984 np->msi_flags |= NV_MSI_X_CAPABLE;
2985 }
2986
2499 err = -ENOMEM; 2987 err = -ENOMEM;
2500 np->base = ioremap(addr, NV_PCI_REGSZ); 2988 np->base = ioremap(addr, NV_PCI_REGSZ);
2501 if (!np->base) 2989 if (!np->base)
@@ -2578,10 +3066,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2578 } else { 3066 } else {
2579 np->tx_flags = NV_TX2_VALID; 3067 np->tx_flags = NV_TX2_VALID;
2580 } 3068 }
2581 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 3069 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2582 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3070 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
2583 else 3071 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
3072 np->msi_flags |= 0x0003;
3073 } else {
2584 np->irqmask = NVREG_IRQMASK_CPU; 3074 np->irqmask = NVREG_IRQMASK_CPU;
3075 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
3076 np->msi_flags |= 0x0001;
3077 }
2585 3078
2586 if (id->driver_data & DEV_NEED_TIMERIRQ) 3079 if (id->driver_data & DEV_NEED_TIMERIRQ)
2587 np->irqmask |= NVREG_IRQ_TIMER; 3080 np->irqmask |= NVREG_IRQ_TIMER;
@@ -2737,11 +3230,11 @@ static struct pci_device_id pci_tbl[] = {
2737 }, 3230 },
2738 { /* MCP55 Ethernet Controller */ 3231 { /* MCP55 Ethernet Controller */
2739 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 3232 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
2740 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 3233 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X,
2741 }, 3234 },
2742 { /* MCP55 Ethernet Controller */ 3235 { /* MCP55 Ethernet Controller */
2743 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 3236 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
2744 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 3237 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X,
2745 }, 3238 },
2746 {0,}, 3239 {0,},
2747}; 3240};
@@ -2771,6 +3264,10 @@ module_param(optimization_mode, int, 0);
2771MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 3264MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
2772module_param(poll_interval, int, 0); 3265module_param(poll_interval, int, 0);
2773MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 3266MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
3267module_param(disable_msi, int, 0);
3268MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1.");
3269module_param(disable_msix, int, 0);
3270MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1.");
2774 3271
2775MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 3272MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2776MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 3273MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index bc9a3bf8d560..0ea4cb4a0d80 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -427,7 +427,7 @@ that case.
427static void hamachi_timer(unsigned long data); 427static void hamachi_timer(unsigned long data);
428 428
429enum capability_flags {CanHaveMII=1, }; 429enum capability_flags {CanHaveMII=1, };
430static struct chip_info { 430static const struct chip_info {
431 u16 vendor_id, device_id, device_id_mask, pad; 431 u16 vendor_id, device_id, device_id_mask, pad;
432 const char *name; 432 const char *name;
433 void (*media_timer)(unsigned long data); 433 void (*media_timer)(unsigned long data);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index e4188d082f01..9220de9f4fe7 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -905,7 +905,7 @@ static int epp_open(struct net_device *dev)
905 /* autoprobe baud rate */ 905 /* autoprobe baud rate */
906 tstart = jiffies; 906 tstart = jiffies;
907 i = 0; 907 i = 0;
908 while ((signed)(jiffies-tstart-HZ/3) < 0) { 908 while (time_before(jiffies, tstart + HZ/3)) {
909 if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) 909 if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
910 goto epptimeout; 910 goto epptimeout;
911 if ((stat & (EPP_NRAEF|EPP_NRHF)) == EPP_NRHF) { 911 if ((stat & (EPP_NRAEF|EPP_NRHF)) == EPP_NRHF) {
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 55c7ed608391..247c8ca86033 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -115,6 +115,7 @@
115#include <linux/delay.h> 115#include <linux/delay.h>
116#include <linux/init.h> 116#include <linux/init.h>
117#include <linux/bitops.h> 117#include <linux/bitops.h>
118#include <linux/jiffies.h>
118 119
119#include <asm/io.h> 120#include <asm/io.h>
120 121
@@ -1499,7 +1500,7 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
1499 printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name); 1500 printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
1500#endif 1501#endif
1501 /* not waited long enough since last tx? */ 1502 /* not waited long enough since last tx? */
1502 if (jiffies - dev->trans_start < HZ) 1503 if (time_before(jiffies, dev->trans_start + HZ))
1503 return -EAGAIN; 1504 return -EAGAIN;
1504 1505
1505 if (hp100_check_lan(dev)) 1506 if (hp100_check_lan(dev))
@@ -1652,7 +1653,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
1652 printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i); 1653 printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
1653#endif 1654#endif
1654 /* not waited long enough since last failed tx try? */ 1655 /* not waited long enough since last failed tx try? */
1655 if (jiffies - dev->trans_start < HZ) { 1656 if (time_before(jiffies, dev->trans_start + HZ)) {
1656#ifdef HP100_DEBUG 1657#ifdef HP100_DEBUG
1657 printk("hp100: %s: trans_start timing problem\n", 1658 printk("hp100: %s: trans_start timing problem\n",
1658 dev->name); 1659 dev->name);
@@ -1718,17 +1719,10 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
1718 hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */ 1719 hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */
1719 1720
1720 if (lp->mode == 2) { /* memory mapped */ 1721 if (lp->mode == 2) { /* memory mapped */
1721 if (lp->mem_ptr_virt) { /* high pci memory was remapped */ 1722 /* Note: The J2585B needs alignment to 32bits here! */
1722 /* Note: The J2585B needs alignment to 32bits here! */ 1723 memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3);
1723 memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3); 1724 if (!ok_flag)
1724 if (!ok_flag) 1725 memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len);
1725 memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len);
1726 } else {
1727 /* Note: The J2585B needs alignment to 32bits here! */
1728 isa_memcpy_toio(lp->mem_ptr_phys, skb->data, (skb->len + 3) & ~3);
1729 if (!ok_flag)
1730 isa_memset_io(lp->mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb->len);
1731 }
1732 } else { /* programmed i/o */ 1726 } else { /* programmed i/o */
1733 outsl(ioaddr + HP100_REG_DATA32, skb->data, 1727 outsl(ioaddr + HP100_REG_DATA32, skb->data,
1734 (skb->len + 3) >> 2); 1728 (skb->len + 3) >> 2);
@@ -1798,10 +1792,7 @@ static void hp100_rx(struct net_device *dev)
1798 /* First we get the header, which contains information about the */ 1792 /* First we get the header, which contains information about the */
1799 /* actual length of the received packet. */ 1793 /* actual length of the received packet. */
1800 if (lp->mode == 2) { /* memory mapped mode */ 1794 if (lp->mode == 2) { /* memory mapped mode */
1801 if (lp->mem_ptr_virt) /* if memory was remapped */ 1795 header = readl(lp->mem_ptr_virt);
1802 header = readl(lp->mem_ptr_virt);
1803 else
1804 header = isa_readl(lp->mem_ptr_phys);
1805 } else /* programmed i/o */ 1796 } else /* programmed i/o */
1806 header = hp100_inl(DATA32); 1797 header = hp100_inl(DATA32);
1807 1798
@@ -1833,13 +1824,9 @@ static void hp100_rx(struct net_device *dev)
1833 ptr = skb->data; 1824 ptr = skb->data;
1834 1825
1835 /* Now transfer the data from the card into that area */ 1826 /* Now transfer the data from the card into that area */
1836 if (lp->mode == 2) { 1827 if (lp->mode == 2)
1837 if (lp->mem_ptr_virt) 1828 memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len);
1838 memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len); 1829 else /* io mapped */
1839 /* Note alignment to 32bit transfers */
1840 else
1841 isa_memcpy_fromio(ptr, lp->mem_ptr_phys, pkt_len);
1842 } else /* io mapped */
1843 insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2); 1830 insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2);
1844 1831
1845 skb->protocol = eth_type_trans(skb, dev); 1832 skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 591c5864ffb1..7e49522b8b3c 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -204,7 +204,7 @@ static inline int emac_phy_gpcs(int phy_mode)
204 204
205static inline void emac_tx_enable(struct ocp_enet_private *dev) 205static inline void emac_tx_enable(struct ocp_enet_private *dev)
206{ 206{
207 struct emac_regs *p = dev->emacp; 207 struct emac_regs __iomem *p = dev->emacp;
208 unsigned long flags; 208 unsigned long flags;
209 u32 r; 209 u32 r;
210 210
@@ -220,7 +220,7 @@ static inline void emac_tx_enable(struct ocp_enet_private *dev)
220 220
221static void emac_tx_disable(struct ocp_enet_private *dev) 221static void emac_tx_disable(struct ocp_enet_private *dev)
222{ 222{
223 struct emac_regs *p = dev->emacp; 223 struct emac_regs __iomem *p = dev->emacp;
224 unsigned long flags; 224 unsigned long flags;
225 u32 r; 225 u32 r;
226 226
@@ -244,7 +244,7 @@ static void emac_tx_disable(struct ocp_enet_private *dev)
244 244
245static void emac_rx_enable(struct ocp_enet_private *dev) 245static void emac_rx_enable(struct ocp_enet_private *dev)
246{ 246{
247 struct emac_regs *p = dev->emacp; 247 struct emac_regs __iomem *p = dev->emacp;
248 unsigned long flags; 248 unsigned long flags;
249 u32 r; 249 u32 r;
250 250
@@ -275,7 +275,7 @@ static void emac_rx_enable(struct ocp_enet_private *dev)
275 275
276static void emac_rx_disable(struct ocp_enet_private *dev) 276static void emac_rx_disable(struct ocp_enet_private *dev)
277{ 277{
278 struct emac_regs *p = dev->emacp; 278 struct emac_regs __iomem *p = dev->emacp;
279 unsigned long flags; 279 unsigned long flags;
280 u32 r; 280 u32 r;
281 281
@@ -299,7 +299,7 @@ static void emac_rx_disable(struct ocp_enet_private *dev)
299 299
300static inline void emac_rx_disable_async(struct ocp_enet_private *dev) 300static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
301{ 301{
302 struct emac_regs *p = dev->emacp; 302 struct emac_regs __iomem *p = dev->emacp;
303 unsigned long flags; 303 unsigned long flags;
304 u32 r; 304 u32 r;
305 305
@@ -315,7 +315,7 @@ static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
315 315
316static int emac_reset(struct ocp_enet_private *dev) 316static int emac_reset(struct ocp_enet_private *dev)
317{ 317{
318 struct emac_regs *p = dev->emacp; 318 struct emac_regs __iomem *p = dev->emacp;
319 unsigned long flags; 319 unsigned long flags;
320 int n = 20; 320 int n = 20;
321 321
@@ -348,7 +348,7 @@ static int emac_reset(struct ocp_enet_private *dev)
348 348
349static void emac_hash_mc(struct ocp_enet_private *dev) 349static void emac_hash_mc(struct ocp_enet_private *dev)
350{ 350{
351 struct emac_regs *p = dev->emacp; 351 struct emac_regs __iomem *p = dev->emacp;
352 u16 gaht[4] = { 0 }; 352 u16 gaht[4] = { 0 };
353 struct dev_mc_list *dmi; 353 struct dev_mc_list *dmi;
354 354
@@ -393,7 +393,7 @@ static inline int emac_opb_mhz(void)
393/* BHs disabled */ 393/* BHs disabled */
394static int emac_configure(struct ocp_enet_private *dev) 394static int emac_configure(struct ocp_enet_private *dev)
395{ 395{
396 struct emac_regs *p = dev->emacp; 396 struct emac_regs __iomem *p = dev->emacp;
397 struct net_device *ndev = dev->ndev; 397 struct net_device *ndev = dev->ndev;
398 int gige; 398 int gige;
399 u32 r; 399 u32 r;
@@ -555,7 +555,7 @@ static void emac_full_tx_reset(struct net_device *ndev)
555 555
556static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg) 556static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
557{ 557{
558 struct emac_regs *p = dev->emacp; 558 struct emac_regs __iomem *p = dev->emacp;
559 u32 r; 559 u32 r;
560 int n; 560 int n;
561 561
@@ -604,7 +604,7 @@ static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
604static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg, 604static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
605 u16 val) 605 u16 val)
606{ 606{
607 struct emac_regs *p = dev->emacp; 607 struct emac_regs __iomem *p = dev->emacp;
608 int n; 608 int n;
609 609
610 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg, 610 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
@@ -666,7 +666,7 @@ static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
666static void emac_set_multicast_list(struct net_device *ndev) 666static void emac_set_multicast_list(struct net_device *ndev)
667{ 667{
668 struct ocp_enet_private *dev = ndev->priv; 668 struct ocp_enet_private *dev = ndev->priv;
669 struct emac_regs *p = dev->emacp; 669 struct emac_regs __iomem *p = dev->emacp;
670 u32 rmr = emac_iff2rmr(ndev); 670 u32 rmr = emac_iff2rmr(ndev);
671 671
672 DBG("%d: multicast %08x" NL, dev->def->index, rmr); 672 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
@@ -825,7 +825,7 @@ static void emac_clean_rx_ring(struct ocp_enet_private *dev)
825} 825}
826 826
827static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot, 827static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
828 int flags) 828 gfp_t flags)
829{ 829{
830 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); 830 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
831 if (unlikely(!skb)) 831 if (unlikely(!skb))
@@ -1047,7 +1047,7 @@ static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1047 1047
1048static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len) 1048static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1049{ 1049{
1050 struct emac_regs *p = dev->emacp; 1050 struct emac_regs __iomem *p = dev->emacp;
1051 struct net_device *ndev = dev->ndev; 1051 struct net_device *ndev = dev->ndev;
1052 1052
1053 /* Send the packet out */ 1053 /* Send the packet out */
@@ -1519,7 +1519,7 @@ static void emac_rxde(void *param)
1519static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs) 1519static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1520{ 1520{
1521 struct ocp_enet_private *dev = dev_instance; 1521 struct ocp_enet_private *dev = dev_instance;
1522 struct emac_regs *p = dev->emacp; 1522 struct emac_regs __iomem *p = dev->emacp;
1523 struct ibm_emac_error_stats *st = &dev->estats; 1523 struct ibm_emac_error_stats *st = &dev->estats;
1524 1524
1525 u32 isr = in_be32(&p->isr); 1525 u32 isr = in_be32(&p->isr);
@@ -1619,17 +1619,17 @@ static void emac_remove(struct ocp_device *ocpdev)
1619 1619
1620 DBG("%d: remove" NL, dev->def->index); 1620 DBG("%d: remove" NL, dev->def->index);
1621 1621
1622 ocp_set_drvdata(ocpdev, 0); 1622 ocp_set_drvdata(ocpdev, NULL);
1623 unregister_netdev(dev->ndev); 1623 unregister_netdev(dev->ndev);
1624 1624
1625 tah_fini(dev->tah_dev); 1625 tah_fini(dev->tah_dev);
1626 rgmii_fini(dev->rgmii_dev, dev->rgmii_input); 1626 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1627 zmii_fini(dev->zmii_dev, dev->zmii_input); 1627 zmii_fini(dev->zmii_dev, dev->zmii_input);
1628 1628
1629 emac_dbg_register(dev->def->index, 0); 1629 emac_dbg_register(dev->def->index, NULL);
1630 1630
1631 mal_unregister_commac(dev->mal, &dev->commac); 1631 mal_unregister_commac(dev->mal, &dev->commac);
1632 iounmap((void *)dev->emacp); 1632 iounmap(dev->emacp);
1633 kfree(dev->ndev); 1633 kfree(dev->ndev);
1634} 1634}
1635 1635
@@ -2048,9 +2048,7 @@ static int __init emac_probe(struct ocp_device *ocpdev)
2048 goto out4; 2048 goto out4;
2049 2049
2050 /* Map EMAC regs */ 2050 /* Map EMAC regs */
2051 dev->emacp = 2051 dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
2052 (struct emac_regs *)ioremap(dev->def->paddr,
2053 sizeof(struct emac_regs));
2054 if (!dev->emacp) { 2052 if (!dev->emacp) {
2055 printk(KERN_ERR "emac%d: could not ioremap device registers!\n", 2053 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2056 dev->def->index); 2054 dev->def->index);
@@ -2210,7 +2208,7 @@ static int __init emac_probe(struct ocp_device *ocpdev)
2210 2208
2211 return 0; 2209 return 0;
2212 out6: 2210 out6:
2213 iounmap((void *)dev->emacp); 2211 iounmap(dev->emacp);
2214 out5: 2212 out5:
2215 tah_fini(dev->tah_dev); 2213 tah_fini(dev->tah_dev);
2216 out4: 2214 out4:
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h
index 911abbaf471b..f61273b2e94f 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.h
+++ b/drivers/net/ibm_emac/ibm_emac_core.h
@@ -155,7 +155,7 @@ struct ibm_emac_error_stats {
155 155
156struct ocp_enet_private { 156struct ocp_enet_private {
157 struct net_device *ndev; /* 0 */ 157 struct net_device *ndev; /* 0 */
158 struct emac_regs *emacp; 158 struct emac_regs __iomem *emacp;
159 159
160 struct mal_descriptor *tx_desc; 160 struct mal_descriptor *tx_desc;
161 int tx_cnt; 161 int tx_cnt;
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c
index 75d3b8639041..c7e1ecfa08fe 100644
--- a/drivers/net/ibm_emac/ibm_emac_debug.c
+++ b/drivers/net/ibm_emac/ibm_emac_debug.c
@@ -58,7 +58,7 @@ static void emac_desc_dump(int idx, struct ocp_enet_private *p)
58 58
59static void emac_mac_dump(int idx, struct ocp_enet_private *dev) 59static void emac_mac_dump(int idx, struct ocp_enet_private *dev)
60{ 60{
61 struct emac_regs *p = dev->emacp; 61 struct emac_regs __iomem *p = dev->emacp;
62 62
63 printk("** EMAC%d registers **\n" 63 printk("** EMAC%d registers **\n"
64 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" 64 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h
index a1ffb8a44fff..7f03d536c9a3 100644
--- a/drivers/net/ibm_emac/ibm_emac_rgmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h
@@ -31,7 +31,7 @@ struct rgmii_regs {
31 31
32/* RGMII device */ 32/* RGMII device */
33struct ibm_ocp_rgmii { 33struct ibm_ocp_rgmii {
34 struct rgmii_regs *base; 34 struct rgmii_regs __iomem *base;
35 int users; /* number of EMACs using this RGMII bridge */ 35 int users; /* number of EMACs using this RGMII bridge */
36}; 36};
37 37
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.c b/drivers/net/ibm_emac/ibm_emac_zmii.c
index 35c1185079ed..e129e0aaa045 100644
--- a/drivers/net/ibm_emac/ibm_emac_zmii.c
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.c
@@ -80,7 +80,7 @@ static inline u32 zmii_mode_mask(int mode, int input)
80static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode) 80static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode)
81{ 81{
82 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); 82 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
83 struct zmii_regs *p; 83 struct zmii_regs __iomem *p;
84 84
85 ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode); 85 ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode);
86 86
@@ -94,8 +94,7 @@ static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode)
94 } 94 }
95 dev->mode = PHY_MODE_NA; 95 dev->mode = PHY_MODE_NA;
96 96
97 p = (struct zmii_regs *)ioremap(ocpdev->def->paddr, 97 p = ioremap(ocpdev->def->paddr, sizeof(struct zmii_regs));
98 sizeof(struct zmii_regs));
99 if (!p) { 98 if (!p) {
100 printk(KERN_ERR 99 printk(KERN_ERR
101 "zmii%d: could not ioremap device registers!\n", 100 "zmii%d: could not ioremap device registers!\n",
@@ -231,7 +230,7 @@ void __exit __zmii_fini(struct ocp_device *ocpdev, int input)
231 if (!--dev->users) { 230 if (!--dev->users) {
232 /* Free everything if this is the last user */ 231 /* Free everything if this is the last user */
233 ocp_set_drvdata(ocpdev, NULL); 232 ocp_set_drvdata(ocpdev, NULL);
234 iounmap((void *)dev->base); 233 iounmap(dev->base);
235 kfree(dev); 234 kfree(dev);
236 } 235 }
237} 236}
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h
index 0bb26062c0ad..92c854410753 100644
--- a/drivers/net/ibm_emac/ibm_emac_zmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.h
@@ -32,7 +32,7 @@ struct zmii_regs {
32 32
33/* ZMII device */ 33/* ZMII device */
34struct ibm_ocp_zmii { 34struct ibm_ocp_zmii {
35 struct zmii_regs *base; 35 struct zmii_regs __iomem *base;
36 int mode; /* subset of PHY_MODE_XXXX */ 36 int mode; /* subset of PHY_MODE_XXXX */
37 int users; /* number of EMACs using this ZMII bridge */ 37 int users; /* number of EMACs using this ZMII bridge */
38 u32 fer_save; /* FER value left by firmware */ 38 u32 fer_save; /* FER value left by firmware */
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 7a081346f079..c81fe1c382d5 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -283,7 +283,7 @@ config USB_IRDA
283 Say Y here if you want to build support for the USB IrDA FIR Dongle 283 Say Y here if you want to build support for the USB IrDA FIR Dongle
284 device driver. To compile it as a module, choose M here: the module 284 device driver. To compile it as a module, choose M here: the module
285 will be called irda-usb. IrDA-USB support the various IrDA USB 285 will be called irda-usb. IrDA-USB support the various IrDA USB
286 dongles available and most of their pecularities. Those dongles 286 dongles available and most of their peculiarities. Those dongles
287 plug in the USB port of your computer, are plug and play, and 287 plug in the USB port of your computer, are plug and play, and
288 support SIR and FIR (4Mbps) speeds. On the other hand, those 288 support SIR and FIR (4Mbps) speeds. On the other hand, those
289 dongles tend to be less efficient than a FIR chipset. 289 dongles tend to be less efficient than a FIR chipset.
@@ -360,7 +360,7 @@ config ALI_FIR
360 help 360 help
361 Say Y here if you want to build support for the ALi M5123 FIR 361 Say Y here if you want to build support for the ALi M5123 FIR
362 Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C, 362 Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C,
363 M1535, M1535D, M1535+, M1535D Sourth Bridge. This driver supports 363 M1535, M1535D, M1535+, M1535D South Bridge. This driver supports
364 SIR, MIR and FIR (4Mbps) speeds. 364 SIR, MIR and FIR (4Mbps) speeds.
365 365
366 To compile it as a module, choose M here: the module will be called 366 To compile it as a module, choose M here: the module will be called
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 02d5c6822733..f6f3dafe83ee 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -622,7 +622,7 @@ static int __init mac_sonic_init_module(void)
622 return 0; 622 return 0;
623 623
624out_unregister: 624out_unregister:
625 driver_unregister(&mac_sonic_driver); 625 platform_driver_unregister(&mac_sonic_driver);
626 626
627 return -ENOMEM; 627 return -ENOMEM;
628} 628}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index c0998ef938e0..9f2661355a4a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -10,7 +10,7 @@
10 * 10 *
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
12 * 12 *
13 * Copyright (C) 2004-2005 MontaVista Software, Inc. 13 * Copyright (C) 2004-2006 MontaVista Software, Inc.
14 * Dale Farnsworth <dale@farnsworth.org> 14 * Dale Farnsworth <dale@farnsworth.org>
15 * 15 *
16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
@@ -37,8 +37,6 @@
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/udp.h> 38#include <linux/udp.h>
39#include <linux/etherdevice.h> 39#include <linux/etherdevice.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42 40
43#include <linux/bitops.h> 41#include <linux/bitops.h>
44#include <linux/delay.h> 42#include <linux/delay.h>
@@ -52,39 +50,16 @@
52#include <asm/delay.h> 50#include <asm/delay.h>
53#include "mv643xx_eth.h" 51#include "mv643xx_eth.h"
54 52
55/*
56 * The first part is the high level driver of the gigE ethernet ports.
57 */
58
59/* Constants */
60#define VLAN_HLEN 4
61#define FCS_LEN 4
62#define DMA_ALIGN 8 /* hw requires 8-byte alignment */
63#define HW_IP_ALIGN 2 /* hw aligns IP header */
64#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
65#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
66
67#define INT_UNMASK_ALL 0x0007ffff
68#define INT_UNMASK_ALL_EXT 0x0011ffff
69#define INT_MASK_ALL 0x00000000
70#define INT_MASK_ALL_EXT 0x00000000
71#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
72#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
73
74#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
75#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
76#else
77#define MAX_DESCS_PER_SKB 1
78#endif
79
80#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
81#define PHY_WAIT_MICRO_SECONDS 10
82
83/* Static function declarations */ 53/* Static function declarations */
84static int eth_port_link_is_up(unsigned int eth_port_num);
85static void eth_port_uc_addr_get(struct net_device *dev, 54static void eth_port_uc_addr_get(struct net_device *dev,
86 unsigned char *MacAddr); 55 unsigned char *MacAddr);
87static void eth_port_set_multicast_list(struct net_device *); 56static void eth_port_set_multicast_list(struct net_device *);
57static void mv643xx_eth_port_enable_tx(unsigned int port_num,
58 unsigned int queues);
59static void mv643xx_eth_port_enable_rx(unsigned int port_num,
60 unsigned int queues);
61static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num);
62static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num);
88static int mv643xx_eth_open(struct net_device *); 63static int mv643xx_eth_open(struct net_device *);
89static int mv643xx_eth_stop(struct net_device *); 64static int mv643xx_eth_stop(struct net_device *);
90static int mv643xx_eth_change_mtu(struct net_device *, int); 65static int mv643xx_eth_change_mtu(struct net_device *, int);
@@ -93,8 +68,12 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num);
93#ifdef MV643XX_NAPI 68#ifdef MV643XX_NAPI
94static int mv643xx_poll(struct net_device *dev, int *budget); 69static int mv643xx_poll(struct net_device *dev, int *budget);
95#endif 70#endif
71static int ethernet_phy_get(unsigned int eth_port_num);
96static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); 72static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
97static int ethernet_phy_detect(unsigned int eth_port_num); 73static int ethernet_phy_detect(unsigned int eth_port_num);
74static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location);
75static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val);
76static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
98static struct ethtool_ops mv643xx_ethtool_ops; 77static struct ethtool_ops mv643xx_ethtool_ops;
99 78
100static char mv643xx_driver_name[] = "mv643xx_eth"; 79static char mv643xx_driver_name[] = "mv643xx_eth";
@@ -153,67 +132,53 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
153} 132}
154 133
155/* 134/*
156 * mv643xx_eth_rx_task 135 * mv643xx_eth_rx_refill_descs
157 * 136 *
158 * Fills / refills RX queue on a certain gigabit ethernet port 137 * Fills / refills RX queue on a certain gigabit ethernet port
159 * 138 *
160 * Input : pointer to ethernet interface network device structure 139 * Input : pointer to ethernet interface network device structure
161 * Output : N/A 140 * Output : N/A
162 */ 141 */
163static void mv643xx_eth_rx_task(void *data) 142static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
164{ 143{
165 struct net_device *dev = (struct net_device *)data;
166 struct mv643xx_private *mp = netdev_priv(dev); 144 struct mv643xx_private *mp = netdev_priv(dev);
167 struct pkt_info pkt_info; 145 struct pkt_info pkt_info;
168 struct sk_buff *skb; 146 struct sk_buff *skb;
169 int unaligned; 147 int unaligned;
170 148
171 if (test_and_set_bit(0, &mp->rx_task_busy)) 149 while (mp->rx_desc_count < mp->rx_ring_size) {
172 panic("%s: Error in test_set_bit / clear_bit", dev->name); 150 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
173
174 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
175 skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
176 if (!skb) 151 if (!skb)
177 break; 152 break;
178 mp->rx_ring_skbs++; 153 mp->rx_desc_count++;
179 unaligned = (u32)skb->data & (DMA_ALIGN - 1); 154 unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1);
180 if (unaligned) 155 if (unaligned)
181 skb_reserve(skb, DMA_ALIGN - unaligned); 156 skb_reserve(skb, ETH_DMA_ALIGN - unaligned);
182 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; 157 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
183 pkt_info.byte_cnt = RX_SKB_SIZE; 158 pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
184 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, 159 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
185 DMA_FROM_DEVICE); 160 ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
186 pkt_info.return_info = skb; 161 pkt_info.return_info = skb;
187 if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { 162 if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
188 printk(KERN_ERR 163 printk(KERN_ERR
189 "%s: Error allocating RX Ring\n", dev->name); 164 "%s: Error allocating RX Ring\n", dev->name);
190 break; 165 break;
191 } 166 }
192 skb_reserve(skb, HW_IP_ALIGN); 167 skb_reserve(skb, ETH_HW_IP_ALIGN);
193 } 168 }
194 clear_bit(0, &mp->rx_task_busy);
195 /* 169 /*
196 * If RX ring is empty of SKB, set a timer to try allocating 170 * If RX ring is empty of SKB, set a timer to try allocating
197 * again in a later time . 171 * again at a later time.
198 */ 172 */
199 if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) { 173 if (mp->rx_desc_count == 0) {
200 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); 174 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
201 /* After 100mSec */ 175 mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */
202 mp->timeout.expires = jiffies + (HZ / 10);
203 add_timer(&mp->timeout); 176 add_timer(&mp->timeout);
204 mp->rx_timer_flag = 1;
205 }
206#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
207 else {
208 /* Return interrupts */
209 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
210 INT_UNMASK_ALL);
211 } 177 }
212#endif
213} 178}
214 179
215/* 180/*
216 * mv643xx_eth_rx_task_timer_wrapper 181 * mv643xx_eth_rx_refill_descs_timer_wrapper
217 * 182 *
218 * Timer routine to wake up RX queue filling task. This function is 183 * Timer routine to wake up RX queue filling task. This function is
219 * used only in case the RX queue is empty, and all alloc_skb has 184 * used only in case the RX queue is empty, and all alloc_skb has
@@ -222,13 +187,9 @@ static void mv643xx_eth_rx_task(void *data)
222 * Input : pointer to ethernet interface network device structure 187 * Input : pointer to ethernet interface network device structure
223 * Output : N/A 188 * Output : N/A
224 */ 189 */
225static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data) 190static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
226{ 191{
227 struct net_device *dev = (struct net_device *)data; 192 mv643xx_eth_rx_refill_descs((struct net_device *)data);
228 struct mv643xx_private *mp = netdev_priv(dev);
229
230 mp->rx_timer_flag = 0;
231 mv643xx_eth_rx_task((void *)data);
232} 193}
233 194
234/* 195/*
@@ -245,8 +206,7 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev)
245 unsigned int port_num = mp->port_num; 206 unsigned int port_num = mp->port_num;
246 207
247 eth_port_init_mac_tables(port_num); 208 eth_port_init_mac_tables(port_num);
248 memcpy(mp->port_mac_addr, dev->dev_addr, 6); 209 eth_port_uc_addr_set(port_num, dev->dev_addr);
249 eth_port_uc_addr_set(port_num, mp->port_mac_addr);
250} 210}
251 211
252/* 212/*
@@ -260,13 +220,14 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev)
260static void mv643xx_eth_set_rx_mode(struct net_device *dev) 220static void mv643xx_eth_set_rx_mode(struct net_device *dev)
261{ 221{
262 struct mv643xx_private *mp = netdev_priv(dev); 222 struct mv643xx_private *mp = netdev_priv(dev);
223 u32 config_reg;
263 224
225 config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num));
264 if (dev->flags & IFF_PROMISC) 226 if (dev->flags & IFF_PROMISC)
265 mp->port_config |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 227 config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
266 else 228 else
267 mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 229 config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
268 230 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg);
269 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
270 231
271 eth_port_set_multicast_list(dev); 232 eth_port_set_multicast_list(dev);
272} 233}
@@ -322,53 +283,82 @@ static void mv643xx_eth_tx_timeout_task(struct net_device *dev)
322 283
323 netif_device_detach(dev); 284 netif_device_detach(dev);
324 eth_port_reset(mp->port_num); 285 eth_port_reset(mp->port_num);
325 eth_port_start(mp); 286 eth_port_start(dev);
326 netif_device_attach(dev); 287 netif_device_attach(dev);
327} 288}
328 289
329/* 290/**
330 * mv643xx_eth_free_tx_queue 291 * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors
331 *
332 * Input : dev - a pointer to the required interface
333 * 292 *
334 * Output : 0 if was able to release skb , nonzero otherwise 293 * If force is non-zero, frees uncompleted descriptors as well
335 */ 294 */
336static int mv643xx_eth_free_tx_queue(struct net_device *dev, 295int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
337 unsigned int eth_int_cause_ext)
338{ 296{
339 struct mv643xx_private *mp = netdev_priv(dev); 297 struct mv643xx_private *mp = netdev_priv(dev);
340 struct net_device_stats *stats = &mp->stats; 298 struct eth_tx_desc *desc;
341 struct pkt_info pkt_info; 299 u32 cmd_sts;
342 int released = 1; 300 struct sk_buff *skb;
301 unsigned long flags;
302 int tx_index;
303 dma_addr_t addr;
304 int count;
305 int released = 0;
306
307 while (mp->tx_desc_count > 0) {
308 spin_lock_irqsave(&mp->lock, flags);
309 tx_index = mp->tx_used_desc_q;
310 desc = &mp->p_tx_desc_area[tx_index];
311 cmd_sts = desc->cmd_sts;
312
313 if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) {
314 spin_unlock_irqrestore(&mp->lock, flags);
315 return released;
316 }
343 317
344 if (!(eth_int_cause_ext & (BIT0 | BIT8))) 318 mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size;
345 return released; 319 mp->tx_desc_count--;
346 320
347 /* Check only queue 0 */ 321 addr = desc->buf_ptr;
348 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { 322 count = desc->byte_cnt;
349 if (pkt_info.cmd_sts & BIT0) { 323 skb = mp->tx_skb[tx_index];
324 if (skb)
325 mp->tx_skb[tx_index] = NULL;
326
327 spin_unlock_irqrestore(&mp->lock, flags);
328
329 if (cmd_sts & ETH_ERROR_SUMMARY) {
350 printk("%s: Error in TX\n", dev->name); 330 printk("%s: Error in TX\n", dev->name);
351 stats->tx_errors++; 331 mp->stats.tx_errors++;
352 } 332 }
353 333
354 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) 334 if (cmd_sts & ETH_TX_FIRST_DESC)
355 dma_unmap_single(NULL, pkt_info.buf_ptr, 335 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
356 pkt_info.byte_cnt,
357 DMA_TO_DEVICE);
358 else 336 else
359 dma_unmap_page(NULL, pkt_info.buf_ptr, 337 dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
360 pkt_info.byte_cnt,
361 DMA_TO_DEVICE);
362 338
363 if (pkt_info.return_info) { 339 if (skb)
364 dev_kfree_skb_irq(pkt_info.return_info); 340 dev_kfree_skb_irq(skb);
365 released = 0; 341
366 } 342 released = 1;
367 } 343 }
368 344
369 return released; 345 return released;
370} 346}
371 347
348static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev)
349{
350 struct mv643xx_private *mp = netdev_priv(dev);
351
352 if (mv643xx_eth_free_tx_descs(dev, 0) &&
353 mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
354 netif_wake_queue(dev);
355}
356
357static void mv643xx_eth_free_all_tx_descs(struct net_device *dev)
358{
359 mv643xx_eth_free_tx_descs(dev, 1);
360}
361
372/* 362/*
373 * mv643xx_eth_receive 363 * mv643xx_eth_receive
374 * 364 *
@@ -380,11 +370,7 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
380 * 370 *
381 * Output : number of served packets 371 * Output : number of served packets
382 */ 372 */
383#ifdef MV643XX_NAPI
384static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) 373static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
385#else
386static int mv643xx_eth_receive_queue(struct net_device *dev)
387#endif
388{ 374{
389 struct mv643xx_private *mp = netdev_priv(dev); 375 struct mv643xx_private *mp = netdev_priv(dev);
390 struct net_device_stats *stats = &mp->stats; 376 struct net_device_stats *stats = &mp->stats;
@@ -392,15 +378,14 @@ static int mv643xx_eth_receive_queue(struct net_device *dev)
392 struct sk_buff *skb; 378 struct sk_buff *skb;
393 struct pkt_info pkt_info; 379 struct pkt_info pkt_info;
394 380
395#ifdef MV643XX_NAPI
396 while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { 381 while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {
397#else 382 mp->rx_desc_count--;
398 while (eth_port_receive(mp, &pkt_info) == ETH_OK) {
399#endif
400 mp->rx_ring_skbs--;
401 received_packets++; 383 received_packets++;
402 384
403 /* Update statistics. Note byte count includes 4 byte CRC count */ 385 /*
386 * Update statistics.
387 * Note byte count includes 4 byte CRC count
388 */
404 stats->rx_packets++; 389 stats->rx_packets++;
405 stats->rx_bytes += pkt_info.byte_cnt; 390 stats->rx_bytes += pkt_info.byte_cnt;
406 skb = pkt_info.return_info; 391 skb = pkt_info.return_info;
@@ -448,10 +433,61 @@ static int mv643xx_eth_receive_queue(struct net_device *dev)
448 } 433 }
449 dev->last_rx = jiffies; 434 dev->last_rx = jiffies;
450 } 435 }
436 mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
451 437
452 return received_packets; 438 return received_packets;
453} 439}
454 440
441/* Set the mv643xx port configuration register for the speed/duplex mode. */
442static void mv643xx_eth_update_pscr(struct net_device *dev,
443 struct ethtool_cmd *ecmd)
444{
445 struct mv643xx_private *mp = netdev_priv(dev);
446 int port_num = mp->port_num;
447 u32 o_pscr, n_pscr;
448 unsigned int queues;
449
450 o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
451 n_pscr = o_pscr;
452
453 /* clear speed, duplex and rx buffer size fields */
454 n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 |
455 MV643XX_ETH_SET_GMII_SPEED_TO_1000 |
456 MV643XX_ETH_SET_FULL_DUPLEX_MODE |
457 MV643XX_ETH_MAX_RX_PACKET_MASK);
458
459 if (ecmd->duplex == DUPLEX_FULL)
460 n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE;
461
462 if (ecmd->speed == SPEED_1000)
463 n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 |
464 MV643XX_ETH_MAX_RX_PACKET_9700BYTE;
465 else {
466 if (ecmd->speed == SPEED_100)
467 n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100;
468 n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE;
469 }
470
471 if (n_pscr != o_pscr) {
472 if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0)
473 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
474 n_pscr);
475 else {
476 queues = mv643xx_eth_port_disable_tx(port_num);
477
478 o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE;
479 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
480 o_pscr);
481 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
482 n_pscr);
483 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
484 n_pscr);
485 if (queues)
486 mv643xx_eth_port_enable_tx(port_num, queues);
487 }
488 }
489}
490
455/* 491/*
456 * mv643xx_eth_int_handler 492 * mv643xx_eth_int_handler
457 * 493 *
@@ -473,78 +509,52 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
473 509
474 /* Read interrupt cause registers */ 510 /* Read interrupt cause registers */
475 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & 511 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
476 INT_UNMASK_ALL; 512 ETH_INT_UNMASK_ALL;
477 513 if (eth_int_cause & ETH_INT_CAUSE_EXT) {
478 if (eth_int_cause & BIT1)
479 eth_int_cause_ext = mv_read( 514 eth_int_cause_ext = mv_read(
480 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 515 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
481 INT_UNMASK_ALL_EXT; 516 ETH_INT_UNMASK_ALL_EXT;
517 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
518 ~eth_int_cause_ext);
519 }
482 520
483#ifdef MV643XX_NAPI 521 /* PHY status changed */
484 if (!(eth_int_cause & 0x0007fffd)) { 522 if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) {
485 /* Dont ack the Rx interrupt */ 523 struct ethtool_cmd cmd;
486#endif 524
487 /* 525 if (mii_link_ok(&mp->mii)) {
488 * Clear specific ethernet port intrerrupt registers by 526 mii_ethtool_gset(&mp->mii, &cmd);
489 * acknowleding relevant bits. 527 mv643xx_eth_update_pscr(dev, &cmd);
490 */ 528 mv643xx_eth_port_enable_tx(port_num,
491 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 529 ETH_TX_QUEUES_ENABLED);
492 ~eth_int_cause); 530 if (!netif_carrier_ok(dev)) {
493 if (eth_int_cause_ext != 0x0) 531 netif_carrier_on(dev);
494 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG 532 if (mp->tx_ring_size - mp->tx_desc_count >=
495 (port_num), ~eth_int_cause_ext); 533 MAX_DESCS_PER_SKB)
496 534 netif_wake_queue(dev);
497 /* UDP change : We may need this */ 535 }
498 if ((eth_int_cause_ext & 0x0000ffff) && 536 } else if (netif_carrier_ok(dev)) {
499 (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && 537 netif_stop_queue(dev);
500 (mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) 538 netif_carrier_off(dev);
501 netif_wake_queue(dev);
502#ifdef MV643XX_NAPI
503 } else {
504 if (netif_rx_schedule_prep(dev)) {
505 /* Mask all the interrupts */
506 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
507 INT_MASK_ALL);
508 /* wait for previous write to complete */
509 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
510 __netif_rx_schedule(dev);
511 } 539 }
512#else 540 }
513 if (eth_int_cause & (BIT2 | BIT11))
514 mv643xx_eth_receive_queue(dev, 0);
515 541
516 /* 542#ifdef MV643XX_NAPI
517 * After forwarded received packets to upper layer, add a task 543 if (eth_int_cause & ETH_INT_CAUSE_RX) {
518 * in an interrupts enabled context that refills the RX ring 544 /* schedule the NAPI poll routine to maintain port */
519 * with skb's.
520 */
521#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
522 /* Mask all interrupts on ethernet port */
523 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 545 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
524 INT_MASK_ALL); 546 ETH_INT_MASK_ALL);
525 /* wait for previous write to take effect */ 547 /* wait for previous write to complete */
526 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 548 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
527 549
528 queue_task(&mp->rx_task, &tq_immediate); 550 netif_rx_schedule(dev);
529 mark_bh(IMMEDIATE_BH); 551 }
530#else 552#else
531 mp->rx_task.func(dev); 553 if (eth_int_cause & ETH_INT_CAUSE_RX)
554 mv643xx_eth_receive_queue(dev, INT_MAX);
555 if (eth_int_cause_ext & ETH_INT_CAUSE_TX)
556 mv643xx_eth_free_completed_tx_descs(dev);
532#endif 557#endif
533#endif
534 }
535 /* PHY status changed */
536 if (eth_int_cause_ext & (BIT16 | BIT20)) {
537 if (eth_port_link_is_up(port_num)) {
538 netif_carrier_on(dev);
539 netif_wake_queue(dev);
540 /* Start TX queue */
541 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG
542 (port_num), 1);
543 } else {
544 netif_carrier_off(dev);
545 netif_stop_queue(dev);
546 }
547 }
548 558
549 /* 559 /*
550 * If no real interrupt occured, exit. 560 * If no real interrupt occured, exit.
@@ -670,9 +680,6 @@ static void ether_init_rx_desc_ring(struct mv643xx_private *mp)
670 mp->rx_used_desc_q = 0; 680 mp->rx_used_desc_q = 0;
671 681
672 mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); 682 mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
673
674 /* Add the queue to the list of RX queues of this port */
675 mp->port_rx_queue_command |= 1;
676} 683}
677 684
678/* 685/*
@@ -712,14 +719,36 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
712 719
713 mp->tx_curr_desc_q = 0; 720 mp->tx_curr_desc_q = 0;
714 mp->tx_used_desc_q = 0; 721 mp->tx_used_desc_q = 0;
715#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
716 mp->tx_first_desc_q = 0;
717#endif
718 722
719 mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); 723 mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
724}
720 725
721 /* Add the queue to the list of Tx queues of this port */ 726static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
722 mp->port_tx_queue_command |= 1; 727{
728 struct mv643xx_private *mp = netdev_priv(dev);
729 int err;
730
731 spin_lock_irq(&mp->lock);
732 err = mii_ethtool_sset(&mp->mii, cmd);
733 spin_unlock_irq(&mp->lock);
734
735 return err;
736}
737
738static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
739{
740 struct mv643xx_private *mp = netdev_priv(dev);
741 int err;
742
743 spin_lock_irq(&mp->lock);
744 err = mii_ethtool_gset(&mp->mii, cmd);
745 spin_unlock_irq(&mp->lock);
746
747 /* The PHY may support 1000baseT_Half, but the mv643xx does not */
748 cmd->supported &= ~SUPPORTED_1000baseT_Half;
749 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
750
751 return err;
723} 752}
724 753
725/* 754/*
@@ -750,23 +779,12 @@ static int mv643xx_eth_open(struct net_device *dev)
750 return -EAGAIN; 779 return -EAGAIN;
751 } 780 }
752 781
753 /* Stop RX Queues */
754 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
755
756 /* Set the MAC Address */
757 memcpy(mp->port_mac_addr, dev->dev_addr, 6);
758
759 eth_port_init(mp); 782 eth_port_init(mp);
760 783
761 INIT_WORK(&mp->rx_task, (void (*)(void *))mv643xx_eth_rx_task, dev);
762
763 memset(&mp->timeout, 0, sizeof(struct timer_list)); 784 memset(&mp->timeout, 0, sizeof(struct timer_list));
764 mp->timeout.function = mv643xx_eth_rx_task_timer_wrapper; 785 mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper;
765 mp->timeout.data = (unsigned long)dev; 786 mp->timeout.data = (unsigned long)dev;
766 787
767 mp->rx_task_busy = 0;
768 mp->rx_timer_flag = 0;
769
770 /* Allocate RX and TX skb rings */ 788 /* Allocate RX and TX skb rings */
771 mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, 789 mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
772 GFP_KERNEL); 790 GFP_KERNEL);
@@ -784,7 +802,7 @@ static int mv643xx_eth_open(struct net_device *dev)
784 } 802 }
785 803
786 /* Allocate TX ring */ 804 /* Allocate TX ring */
787 mp->tx_ring_skbs = 0; 805 mp->tx_desc_count = 0;
788 size = mp->tx_ring_size * sizeof(struct eth_tx_desc); 806 size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
789 mp->tx_desc_area_size = size; 807 mp->tx_desc_area_size = size;
790 808
@@ -809,7 +827,7 @@ static int mv643xx_eth_open(struct net_device *dev)
809 ether_init_tx_desc_ring(mp); 827 ether_init_tx_desc_ring(mp);
810 828
811 /* Allocate RX ring */ 829 /* Allocate RX ring */
812 mp->rx_ring_skbs = 0; 830 mp->rx_desc_count = 0;
813 size = mp->rx_ring_size * sizeof(struct eth_rx_desc); 831 size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
814 mp->rx_desc_area_size = size; 832 mp->rx_desc_area_size = size;
815 833
@@ -839,9 +857,13 @@ static int mv643xx_eth_open(struct net_device *dev)
839 857
840 ether_init_rx_desc_ring(mp); 858 ether_init_rx_desc_ring(mp);
841 859
842 mv643xx_eth_rx_task(dev); /* Fill RX ring with skb's */ 860 mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
861
862 /* Clear any pending ethernet port interrupts */
863 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
864 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
843 865
844 eth_port_start(mp); 866 eth_port_start(dev);
845 867
846 /* Interrupt Coalescing */ 868 /* Interrupt Coalescing */
847 869
@@ -853,16 +875,13 @@ static int mv643xx_eth_open(struct net_device *dev)
853 mp->tx_int_coal = 875 mp->tx_int_coal =
854 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 876 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
855 877
856 /* Clear any pending ethernet port interrupts */
857 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
858 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
859
860 /* Unmask phy and link status changes interrupts */ 878 /* Unmask phy and link status changes interrupts */
861 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 879 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
862 INT_UNMASK_ALL_EXT); 880 ETH_INT_UNMASK_ALL_EXT);
863 881
864 /* Unmask RX buffer and TX end interrupt */ 882 /* Unmask RX buffer and TX end interrupt */
865 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); 883 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
884
866 return 0; 885 return 0;
867 886
868out_free_tx_skb: 887out_free_tx_skb:
@@ -878,25 +897,14 @@ out_free_irq:
878static void mv643xx_eth_free_tx_rings(struct net_device *dev) 897static void mv643xx_eth_free_tx_rings(struct net_device *dev)
879{ 898{
880 struct mv643xx_private *mp = netdev_priv(dev); 899 struct mv643xx_private *mp = netdev_priv(dev);
881 unsigned int port_num = mp->port_num;
882 unsigned int curr;
883 struct sk_buff *skb;
884 900
885 /* Stop Tx Queues */ 901 /* Stop Tx Queues */
886 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 902 mv643xx_eth_port_disable_tx(mp->port_num);
887 903
888 /* Free outstanding skb's on TX rings */ 904 /* Free outstanding skb's on TX ring */
889 for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { 905 mv643xx_eth_free_all_tx_descs(dev);
890 skb = mp->tx_skb[curr]; 906
891 if (skb) { 907 BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q);
892 mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags;
893 dev_kfree_skb(skb);
894 mp->tx_ring_skbs--;
895 }
896 }
897 if (mp->tx_ring_skbs)
898 printk("%s: Error on Tx descriptor free - could not free %d"
899 " descriptors\n", dev->name, mp->tx_ring_skbs);
900 908
901 /* Free TX ring */ 909 /* Free TX ring */
902 if (mp->tx_sram_size) 910 if (mp->tx_sram_size)
@@ -913,21 +921,21 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
913 int curr; 921 int curr;
914 922
915 /* Stop RX Queues */ 923 /* Stop RX Queues */
916 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 924 mv643xx_eth_port_disable_rx(port_num);
917 925
918 /* Free preallocated skb's on RX rings */ 926 /* Free preallocated skb's on RX rings */
919 for (curr = 0; mp->rx_ring_skbs && curr < mp->rx_ring_size; curr++) { 927 for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
920 if (mp->rx_skb[curr]) { 928 if (mp->rx_skb[curr]) {
921 dev_kfree_skb(mp->rx_skb[curr]); 929 dev_kfree_skb(mp->rx_skb[curr]);
922 mp->rx_ring_skbs--; 930 mp->rx_desc_count--;
923 } 931 }
924 } 932 }
925 933
926 if (mp->rx_ring_skbs) 934 if (mp->rx_desc_count)
927 printk(KERN_ERR 935 printk(KERN_ERR
928 "%s: Error in freeing Rx Ring. %d skb's still" 936 "%s: Error in freeing Rx Ring. %d skb's still"
929 " stuck in RX Ring - ignoring them\n", dev->name, 937 " stuck in RX Ring - ignoring them\n", dev->name,
930 mp->rx_ring_skbs); 938 mp->rx_desc_count);
931 /* Free RX ring */ 939 /* Free RX ring */
932 if (mp->rx_sram_size) 940 if (mp->rx_sram_size)
933 iounmap(mp->p_rx_desc_area); 941 iounmap(mp->p_rx_desc_area);
@@ -952,7 +960,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
952 unsigned int port_num = mp->port_num; 960 unsigned int port_num = mp->port_num;
953 961
954 /* Mask all interrupts on ethernet port */ 962 /* Mask all interrupts on ethernet port */
955 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); 963 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
956 /* wait for previous write to complete */ 964 /* wait for previous write to complete */
957 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 965 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
958 966
@@ -977,30 +985,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
977} 985}
978 986
979#ifdef MV643XX_NAPI 987#ifdef MV643XX_NAPI
980static void mv643xx_tx(struct net_device *dev)
981{
982 struct mv643xx_private *mp = netdev_priv(dev);
983 struct pkt_info pkt_info;
984
985 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
986 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
987 dma_unmap_single(NULL, pkt_info.buf_ptr,
988 pkt_info.byte_cnt,
989 DMA_TO_DEVICE);
990 else
991 dma_unmap_page(NULL, pkt_info.buf_ptr,
992 pkt_info.byte_cnt,
993 DMA_TO_DEVICE);
994
995 if (pkt_info.return_info)
996 dev_kfree_skb_irq(pkt_info.return_info);
997 }
998
999 if (netif_queue_stopped(dev) &&
1000 mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)
1001 netif_wake_queue(dev);
1002}
1003
1004/* 988/*
1005 * mv643xx_poll 989 * mv643xx_poll
1006 * 990 *
@@ -1014,7 +998,7 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1014 998
1015#ifdef MV643XX_TX_FAST_REFILL 999#ifdef MV643XX_TX_FAST_REFILL
1016 if (++mp->tx_clean_threshold > 5) { 1000 if (++mp->tx_clean_threshold > 5) {
1017 mv643xx_tx(dev); 1001 mv643xx_eth_free_completed_tx_descs(dev);
1018 mp->tx_clean_threshold = 0; 1002 mp->tx_clean_threshold = 0;
1019 } 1003 }
1020#endif 1004#endif
@@ -1025,7 +1009,6 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1025 if (orig_budget > dev->quota) 1009 if (orig_budget > dev->quota)
1026 orig_budget = dev->quota; 1010 orig_budget = dev->quota;
1027 work_done = mv643xx_eth_receive_queue(dev, orig_budget); 1011 work_done = mv643xx_eth_receive_queue(dev, orig_budget);
1028 mp->rx_task.func(dev);
1029 *budget -= work_done; 1012 *budget -= work_done;
1030 dev->quota -= work_done; 1013 dev->quota -= work_done;
1031 if (work_done >= orig_budget) 1014 if (work_done >= orig_budget)
@@ -1037,14 +1020,17 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1037 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1020 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
1038 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1021 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1039 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1022 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1040 INT_UNMASK_ALL); 1023 ETH_INT_UNMASK_ALL);
1041 } 1024 }
1042 1025
1043 return done ? 0 : 1; 1026 return done ? 0 : 1;
1044} 1027}
1045#endif 1028#endif
1046 1029
1047/* Hardware can't handle unaligned fragments smaller than 9 bytes. 1030/**
1031 * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments
1032 *
1033 * Hardware can't handle unaligned fragments smaller than 9 bytes.
1048 * This helper function detects that case. 1034 * This helper function detects that case.
1049 */ 1035 */
1050 1036
@@ -1061,223 +1047,166 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1061 return 0; 1047 return 0;
1062} 1048}
1063 1049
1050/**
1051 * eth_alloc_tx_desc_index - return the index of the next available tx desc
1052 */
1053static int eth_alloc_tx_desc_index(struct mv643xx_private *mp)
1054{
1055 int tx_desc_curr;
1064 1056
1065/* 1057 BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
1066 * mv643xx_eth_start_xmit 1058
1067 * 1059 tx_desc_curr = mp->tx_curr_desc_q;
1068 * This function is queues a packet in the Tx descriptor for 1060 mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size;
1069 * required port. 1061
1070 * 1062 BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q);
1071 * Input : skb - a pointer to socket buffer 1063
1072 * dev - a pointer to the required port 1064 return tx_desc_curr;
1065}
1066
1067/**
1068 * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments.
1073 * 1069 *
1074 * Output : zero upon success 1070 * Ensure the data for each fragment to be transmitted is mapped properly,
1071 * then fill in descriptors in the tx hw queue.
1075 */ 1072 */
1076static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) 1073static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
1074 struct sk_buff *skb)
1077{ 1075{
1078 struct mv643xx_private *mp = netdev_priv(dev); 1076 int frag;
1079 struct net_device_stats *stats = &mp->stats; 1077 int tx_index;
1080 ETH_FUNC_RET_STATUS status; 1078 struct eth_tx_desc *desc;
1081 unsigned long flags;
1082 struct pkt_info pkt_info;
1083 1079
1084 if (netif_queue_stopped(dev)) { 1080 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1085 printk(KERN_ERR 1081 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1086 "%s: Tried sending packet when interface is stopped\n", 1082
1087 dev->name); 1083 tx_index = eth_alloc_tx_desc_index(mp);
1088 return 1; 1084 desc = &mp->p_tx_desc_area[tx_index];
1085
1086 desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA;
1087 /* Last Frag enables interrupt and frees the skb */
1088 if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
1089 desc->cmd_sts |= ETH_ZERO_PADDING |
1090 ETH_TX_LAST_DESC |
1091 ETH_TX_ENABLE_INTERRUPT;
1092 mp->tx_skb[tx_index] = skb;
1093 } else
1094 mp->tx_skb[tx_index] = 0;
1095
1096 desc = &mp->p_tx_desc_area[tx_index];
1097 desc->l4i_chk = 0;
1098 desc->byte_cnt = this_frag->size;
1099 desc->buf_ptr = dma_map_page(NULL, this_frag->page,
1100 this_frag->page_offset,
1101 this_frag->size,
1102 DMA_TO_DEVICE);
1089 } 1103 }
1104}
1090 1105
1091 /* This is a hard error, log it. */ 1106/**
1092 if ((mp->tx_ring_size - mp->tx_ring_skbs) <= 1107 * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw
1093 (skb_shinfo(skb)->nr_frags + 1)) { 1108 *
1094 netif_stop_queue(dev); 1109 * Ensure the data for an skb to be transmitted is mapped properly,
1095 printk(KERN_ERR 1110 * then fill in descriptors in the tx hw queue and start the hardware.
1096 "%s: Bug in mv643xx_eth - Trying to transmit when" 1111 */
1097 " queue full !\n", dev->name); 1112static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1098 return 1; 1113 struct sk_buff *skb)
1099 } 1114{
1115 int tx_index;
1116 struct eth_tx_desc *desc;
1117 u32 cmd_sts;
1118 int length;
1119 int nr_frags = skb_shinfo(skb)->nr_frags;
1100 1120
1101 /* Paranoid check - this shouldn't happen */ 1121 cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA;
1102 if (skb == NULL) {
1103 stats->tx_dropped++;
1104 printk(KERN_ERR "mv64320_eth paranoid check failed\n");
1105 return 1;
1106 }
1107 1122
1108#ifdef MV643XX_CHECKSUM_OFFLOAD_TX 1123 tx_index = eth_alloc_tx_desc_index(mp);
1109 if (has_tiny_unaligned_frags(skb)) { 1124 desc = &mp->p_tx_desc_area[tx_index];
1110 if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
1111 stats->tx_dropped++;
1112 printk(KERN_DEBUG "%s: failed to linearize tiny "
1113 "unaligned fragment\n", dev->name);
1114 return 1;
1115 }
1116 }
1117 1125
1118 spin_lock_irqsave(&mp->lock, flags); 1126 if (nr_frags) {
1127 eth_tx_fill_frag_descs(mp, skb);
1119 1128
1120 if (!skb_shinfo(skb)->nr_frags) { 1129 length = skb_headlen(skb);
1121 if (skb->ip_summed != CHECKSUM_HW) { 1130 mp->tx_skb[tx_index] = 0;
1122 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1123 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1124 ETH_TX_FIRST_DESC |
1125 ETH_TX_LAST_DESC |
1126 5 << ETH_TX_IHL_SHIFT;
1127 pkt_info.l4i_chk = 0;
1128 } else {
1129 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1130 ETH_TX_FIRST_DESC |
1131 ETH_TX_LAST_DESC |
1132 ETH_GEN_TCP_UDP_CHECKSUM |
1133 ETH_GEN_IP_V_4_CHECKSUM |
1134 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1135 /* CPU already calculated pseudo header checksum. */
1136 if ((skb->protocol == ETH_P_IP) &&
1137 (skb->nh.iph->protocol == IPPROTO_UDP) ) {
1138 pkt_info.cmd_sts |= ETH_UDP_FRAME;
1139 pkt_info.l4i_chk = skb->h.uh->check;
1140 } else if ((skb->protocol == ETH_P_IP) &&
1141 (skb->nh.iph->protocol == IPPROTO_TCP))
1142 pkt_info.l4i_chk = skb->h.th->check;
1143 else {
1144 printk(KERN_ERR
1145 "%s: chksum proto != IPv4 TCP or UDP\n",
1146 dev->name);
1147 spin_unlock_irqrestore(&mp->lock, flags);
1148 return 1;
1149 }
1150 }
1151 pkt_info.byte_cnt = skb->len;
1152 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len,
1153 DMA_TO_DEVICE);
1154 pkt_info.return_info = skb;
1155 status = eth_port_send(mp, &pkt_info);
1156 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
1157 printk(KERN_ERR "%s: Error on transmitting packet\n",
1158 dev->name);
1159 stats->tx_bytes += pkt_info.byte_cnt;
1160 } else { 1131 } else {
1161 unsigned int frag; 1132 cmd_sts |= ETH_ZERO_PADDING |
1133 ETH_TX_LAST_DESC |
1134 ETH_TX_ENABLE_INTERRUPT;
1135 length = skb->len;
1136 mp->tx_skb[tx_index] = skb;
1137 }
1162 1138
1163 /* first frag which is skb header */ 1139 desc->byte_cnt = length;
1164 pkt_info.byte_cnt = skb_headlen(skb); 1140 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1165 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
1166 skb_headlen(skb),
1167 DMA_TO_DEVICE);
1168 pkt_info.l4i_chk = 0;
1169 pkt_info.return_info = 0;
1170
1171 if (skb->ip_summed != CHECKSUM_HW)
1172 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1173 pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
1174 5 << ETH_TX_IHL_SHIFT;
1175 else {
1176 pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
1177 ETH_GEN_TCP_UDP_CHECKSUM |
1178 ETH_GEN_IP_V_4_CHECKSUM |
1179 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1180 /* CPU already calculated pseudo header checksum. */
1181 if ((skb->protocol == ETH_P_IP) &&
1182 (skb->nh.iph->protocol == IPPROTO_UDP)) {
1183 pkt_info.cmd_sts |= ETH_UDP_FRAME;
1184 pkt_info.l4i_chk = skb->h.uh->check;
1185 } else if ((skb->protocol == ETH_P_IP) &&
1186 (skb->nh.iph->protocol == IPPROTO_TCP))
1187 pkt_info.l4i_chk = skb->h.th->check;
1188 else {
1189 printk(KERN_ERR
1190 "%s: chksum proto != IPv4 TCP or UDP\n",
1191 dev->name);
1192 spin_unlock_irqrestore(&mp->lock, flags);
1193 return 1;
1194 }
1195 }
1196 1141
1197 status = eth_port_send(mp, &pkt_info); 1142 if (skb->ip_summed == CHECKSUM_HW) {
1198 if (status != ETH_OK) { 1143 BUG_ON(skb->protocol != ETH_P_IP);
1199 if ((status == ETH_ERROR)) 1144
1200 printk(KERN_ERR 1145 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
1201 "%s: Error on transmitting packet\n", 1146 ETH_GEN_IP_V_4_CHECKSUM |
1202 dev->name); 1147 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1203 if (status == ETH_QUEUE_FULL) 1148
1204 printk("Error on Queue Full \n"); 1149 switch (skb->nh.iph->protocol) {
1205 if (status == ETH_QUEUE_LAST_RESOURCE) 1150 case IPPROTO_UDP:
1206 printk("Tx resource error \n"); 1151 cmd_sts |= ETH_UDP_FRAME;
1152 desc->l4i_chk = skb->h.uh->check;
1153 break;
1154 case IPPROTO_TCP:
1155 desc->l4i_chk = skb->h.th->check;
1156 break;
1157 default:
1158 BUG();
1207 } 1159 }
1208 stats->tx_bytes += pkt_info.byte_cnt; 1160 } else {
1209 1161 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1210 /* Check for the remaining frags */ 1162 cmd_sts |= 5 << ETH_TX_IHL_SHIFT;
1211 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1163 desc->l4i_chk = 0;
1212 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1164 }
1213 pkt_info.l4i_chk = 0x0000; 1165
1214 pkt_info.cmd_sts = 0x00000000; 1166 /* ensure all other descriptors are written before first cmd_sts */
1215 1167 wmb();
1216 /* Last Frag enables interrupt and frees the skb */ 1168 desc->cmd_sts = cmd_sts;
1217 if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
1218 pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT |
1219 ETH_TX_LAST_DESC;
1220 pkt_info.return_info = skb;
1221 } else {
1222 pkt_info.return_info = 0;
1223 }
1224 pkt_info.l4i_chk = 0;
1225 pkt_info.byte_cnt = this_frag->size;
1226 1169
1227 pkt_info.buf_ptr = dma_map_page(NULL, this_frag->page, 1170 /* ensure all descriptors are written before poking hardware */
1228 this_frag->page_offset, 1171 wmb();
1229 this_frag->size, 1172 mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED);
1230 DMA_TO_DEVICE);
1231 1173
1232 status = eth_port_send(mp, &pkt_info); 1174 mp->tx_desc_count += nr_frags + 1;
1175}
1233 1176
1234 if (status != ETH_OK) { 1177/**
1235 if ((status == ETH_ERROR)) 1178 * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission
1236 printk(KERN_ERR "%s: Error on " 1179 *
1237 "transmitting packet\n", 1180 */
1238 dev->name); 1181static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1182{
1183 struct mv643xx_private *mp = netdev_priv(dev);
1184 struct net_device_stats *stats = &mp->stats;
1185 unsigned long flags;
1239 1186
1240 if (status == ETH_QUEUE_LAST_RESOURCE) 1187 BUG_ON(netif_queue_stopped(dev));
1241 printk("Tx resource error \n"); 1188 BUG_ON(skb == NULL);
1189 BUG_ON(mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB);
1242 1190
1243 if (status == ETH_QUEUE_FULL) 1191 if (has_tiny_unaligned_frags(skb)) {
1244 printk("Queue is full \n"); 1192 if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
1245 } 1193 stats->tx_dropped++;
1246 stats->tx_bytes += pkt_info.byte_cnt; 1194 printk(KERN_DEBUG "%s: failed to linearize tiny "
1195 "unaligned fragment\n", dev->name);
1196 return 1;
1247 } 1197 }
1248 } 1198 }
1249#else
1250 spin_lock_irqsave(&mp->lock, flags);
1251 1199
1252 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | 1200 spin_lock_irqsave(&mp->lock, flags);
1253 ETH_TX_LAST_DESC;
1254 pkt_info.l4i_chk = 0;
1255 pkt_info.byte_cnt = skb->len;
1256 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len,
1257 DMA_TO_DEVICE);
1258 pkt_info.return_info = skb;
1259 status = eth_port_send(mp, &pkt_info);
1260 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
1261 printk(KERN_ERR "%s: Error on transmitting packet\n",
1262 dev->name);
1263 stats->tx_bytes += pkt_info.byte_cnt;
1264#endif
1265
1266 /* Check if TX queue can handle another skb. If not, then
1267 * signal higher layers to stop requesting TX
1268 */
1269 if (mp->tx_ring_size <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
1270 /*
1271 * Stop getting skb's from upper layers.
1272 * Getting skb's from upper layers will be enabled again after
1273 * packets are released.
1274 */
1275 netif_stop_queue(dev);
1276 1201
1277 /* Update statistics and start of transmittion time */ 1202 eth_tx_submit_descs_for_skb(mp, skb);
1203 stats->tx_bytes = skb->len;
1278 stats->tx_packets++; 1204 stats->tx_packets++;
1279 dev->trans_start = jiffies; 1205 dev->trans_start = jiffies;
1280 1206
1207 if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB)
1208 netif_stop_queue(dev);
1209
1281 spin_unlock_irqrestore(&mp->lock, flags); 1210 spin_unlock_irqrestore(&mp->lock, flags);
1282 1211
1283 return 0; /* success */ 1212 return 0; /* success */
@@ -1306,16 +1235,45 @@ static void mv643xx_netpoll(struct net_device *netdev)
1306 struct mv643xx_private *mp = netdev_priv(netdev); 1235 struct mv643xx_private *mp = netdev_priv(netdev);
1307 int port_num = mp->port_num; 1236 int port_num = mp->port_num;
1308 1237
1309 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); 1238 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1310 /* wait for previous write to complete */ 1239 /* wait for previous write to complete */
1311 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1240 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
1312 1241
1313 mv643xx_eth_int_handler(netdev->irq, netdev, NULL); 1242 mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
1314 1243
1315 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); 1244 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1316} 1245}
1317#endif 1246#endif
1318 1247
1248static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address,
1249 int speed, int duplex,
1250 struct ethtool_cmd *cmd)
1251{
1252 struct mv643xx_private *mp = netdev_priv(dev);
1253
1254 memset(cmd, 0, sizeof(*cmd));
1255
1256 cmd->port = PORT_MII;
1257 cmd->transceiver = XCVR_INTERNAL;
1258 cmd->phy_address = phy_address;
1259
1260 if (speed == 0) {
1261 cmd->autoneg = AUTONEG_ENABLE;
1262 /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
1263 cmd->speed = SPEED_100;
1264 cmd->advertising = ADVERTISED_10baseT_Half |
1265 ADVERTISED_10baseT_Full |
1266 ADVERTISED_100baseT_Half |
1267 ADVERTISED_100baseT_Full;
1268 if (mp->mii.supports_gmii)
1269 cmd->advertising |= ADVERTISED_1000baseT_Full;
1270 } else {
1271 cmd->autoneg = AUTONEG_DISABLE;
1272 cmd->speed = speed;
1273 cmd->duplex = duplex;
1274 }
1275}
1276
1319/*/ 1277/*/
1320 * mv643xx_eth_probe 1278 * mv643xx_eth_probe
1321 * 1279 *
@@ -1336,6 +1294,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1336 u8 *p; 1294 u8 *p;
1337 struct resource *res; 1295 struct resource *res;
1338 int err; 1296 int err;
1297 struct ethtool_cmd cmd;
1298 int duplex = DUPLEX_HALF;
1299 int speed = 0; /* default to auto-negotiation */
1339 1300
1340 dev = alloc_etherdev(sizeof(struct mv643xx_private)); 1301 dev = alloc_etherdev(sizeof(struct mv643xx_private));
1341 if (!dev) 1302 if (!dev)
@@ -1373,6 +1334,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1373 dev->tx_queue_len = mp->tx_ring_size; 1334 dev->tx_queue_len = mp->tx_ring_size;
1374 dev->base_addr = 0; 1335 dev->base_addr = 0;
1375 dev->change_mtu = mv643xx_eth_change_mtu; 1336 dev->change_mtu = mv643xx_eth_change_mtu;
1337 dev->do_ioctl = mv643xx_eth_do_ioctl;
1376 SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops); 1338 SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops);
1377 1339
1378#ifdef MV643XX_CHECKSUM_OFFLOAD_TX 1340#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
@@ -1393,33 +1355,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1393 1355
1394 /* set default config values */ 1356 /* set default config values */
1395 eth_port_uc_addr_get(dev, dev->dev_addr); 1357 eth_port_uc_addr_get(dev, dev->dev_addr);
1396 mp->port_config = MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE;
1397 mp->port_config_extend = MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE;
1398 mp->port_sdma_config = MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE;
1399 mp->port_serial_control = MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE;
1400 mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1358 mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
1401 mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; 1359 mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
1402 1360
1403 pd = pdev->dev.platform_data; 1361 pd = pdev->dev.platform_data;
1404 if (pd) { 1362 if (pd) {
1405 if (pd->mac_addr != NULL) 1363 if (pd->mac_addr)
1406 memcpy(dev->dev_addr, pd->mac_addr, 6); 1364 memcpy(dev->dev_addr, pd->mac_addr, 6);
1407 1365
1408 if (pd->phy_addr || pd->force_phy_addr) 1366 if (pd->phy_addr || pd->force_phy_addr)
1409 ethernet_phy_set(port_num, pd->phy_addr); 1367 ethernet_phy_set(port_num, pd->phy_addr);
1410 1368
1411 if (pd->port_config || pd->force_port_config)
1412 mp->port_config = pd->port_config;
1413
1414 if (pd->port_config_extend || pd->force_port_config_extend)
1415 mp->port_config_extend = pd->port_config_extend;
1416
1417 if (pd->port_sdma_config || pd->force_port_sdma_config)
1418 mp->port_sdma_config = pd->port_sdma_config;
1419
1420 if (pd->port_serial_control || pd->force_port_serial_control)
1421 mp->port_serial_control = pd->port_serial_control;
1422
1423 if (pd->rx_queue_size) 1369 if (pd->rx_queue_size)
1424 mp->rx_ring_size = pd->rx_queue_size; 1370 mp->rx_ring_size = pd->rx_queue_size;
1425 1371
@@ -1435,16 +1381,33 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1435 mp->rx_sram_size = pd->rx_sram_size; 1381 mp->rx_sram_size = pd->rx_sram_size;
1436 mp->rx_sram_addr = pd->rx_sram_addr; 1382 mp->rx_sram_addr = pd->rx_sram_addr;
1437 } 1383 }
1384
1385 duplex = pd->duplex;
1386 speed = pd->speed;
1438 } 1387 }
1439 1388
1389 /* Hook up MII support for ethtool */
1390 mp->mii.dev = dev;
1391 mp->mii.mdio_read = mv643xx_mdio_read;
1392 mp->mii.mdio_write = mv643xx_mdio_write;
1393 mp->mii.phy_id = ethernet_phy_get(port_num);
1394 mp->mii.phy_id_mask = 0x3f;
1395 mp->mii.reg_num_mask = 0x1f;
1396
1440 err = ethernet_phy_detect(port_num); 1397 err = ethernet_phy_detect(port_num);
1441 if (err) { 1398 if (err) {
1442 pr_debug("MV643xx ethernet port %d: " 1399 pr_debug("MV643xx ethernet port %d: "
1443 "No PHY detected at addr %d\n", 1400 "No PHY detected at addr %d\n",
1444 port_num, ethernet_phy_get(port_num)); 1401 port_num, ethernet_phy_get(port_num));
1445 return err; 1402 goto out;
1446 } 1403 }
1447 1404
1405 ethernet_phy_reset(port_num);
1406 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
1407 mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
1408 mv643xx_eth_update_pscr(dev, &cmd);
1409 mv643xx_set_settings(dev, &cmd);
1410
1448 err = register_netdev(dev); 1411 err = register_netdev(dev);
1449 if (err) 1412 if (err)
1450 goto out; 1413 goto out;
@@ -1689,26 +1652,9 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
1689 * to the Rx descriptor ring to enable the reuse of this source. 1652 * to the Rx descriptor ring to enable the reuse of this source.
1690 * Return Rx resource is done using the eth_rx_return_buff API. 1653 * Return Rx resource is done using the eth_rx_return_buff API.
1691 * 1654 *
1692 * Transmit operation:
1693 * The eth_port_send API supports Scatter-Gather which enables to
1694 * send a packet spanned over multiple buffers. This means that
1695 * for each packet info structure given by the user and put into
1696 * the Tx descriptors ring, will be transmitted only if the 'LAST'
1697 * bit will be set in the packet info command status field. This
1698 * API also consider restriction regarding buffer alignments and
1699 * sizes.
1700 * The user must return a Tx resource after ensuring the buffer
1701 * has been transmitted to enable the Tx ring indexes to update.
1702 *
1703 * BOARD LAYOUT
1704 * This device is on-board. No jumper diagram is necessary.
1705 *
1706 * EXTERNAL INTERFACE
1707 *
1708 * Prior to calling the initialization routine eth_port_init() the user 1655 * Prior to calling the initialization routine eth_port_init() the user
1709 * must set the following fields under mv643xx_private struct: 1656 * must set the following fields under mv643xx_private struct:
1710 * port_num User Ethernet port number. 1657 * port_num User Ethernet port number.
1711 * port_mac_addr[6] User defined port MAC address.
1712 * port_config User port configuration value. 1658 * port_config User port configuration value.
1713 * port_config_extend User port config extend value. 1659 * port_config_extend User port config extend value.
1714 * port_sdma_config User port SDMA config value. 1660 * port_sdma_config User port SDMA config value.
@@ -1725,20 +1671,12 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
1725 * return_info Tx/Rx user resource return information. 1671 * return_info Tx/Rx user resource return information.
1726 */ 1672 */
1727 1673
1728/* defines */
1729/* SDMA command macros */
1730#define ETH_ENABLE_TX_QUEUE(eth_port) \
1731 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
1732
1733/* locals */
1734
1735/* PHY routines */ 1674/* PHY routines */
1736static int ethernet_phy_get(unsigned int eth_port_num); 1675static int ethernet_phy_get(unsigned int eth_port_num);
1737static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); 1676static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
1738 1677
1739/* Ethernet Port routines */ 1678/* Ethernet Port routines */
1740static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, 1679static void eth_port_set_filter_table_entry(int table, unsigned char entry);
1741 int option);
1742 1680
1743/* 1681/*
1744 * eth_port_init - Initialize the Ethernet port driver 1682 * eth_port_init - Initialize the Ethernet port driver
@@ -1766,17 +1704,11 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
1766 */ 1704 */
1767static void eth_port_init(struct mv643xx_private *mp) 1705static void eth_port_init(struct mv643xx_private *mp)
1768{ 1706{
1769 mp->port_rx_queue_command = 0;
1770 mp->port_tx_queue_command = 0;
1771
1772 mp->rx_resource_err = 0; 1707 mp->rx_resource_err = 0;
1773 mp->tx_resource_err = 0;
1774 1708
1775 eth_port_reset(mp->port_num); 1709 eth_port_reset(mp->port_num);
1776 1710
1777 eth_port_init_mac_tables(mp->port_num); 1711 eth_port_init_mac_tables(mp->port_num);
1778
1779 ethernet_phy_reset(mp->port_num);
1780} 1712}
1781 1713
1782/* 1714/*
@@ -1798,7 +1730,7 @@ static void eth_port_init(struct mv643xx_private *mp)
1798 * and ether_init_rx_desc_ring for Rx queues). 1730 * and ether_init_rx_desc_ring for Rx queues).
1799 * 1731 *
1800 * INPUT: 1732 * INPUT:
1801 * struct mv643xx_private *mp Ethernet port control struct 1733 * dev - a pointer to the required interface
1802 * 1734 *
1803 * OUTPUT: 1735 * OUTPUT:
1804 * Ethernet port is ready to receive and transmit. 1736 * Ethernet port is ready to receive and transmit.
@@ -1806,10 +1738,13 @@ static void eth_port_init(struct mv643xx_private *mp)
1806 * RETURN: 1738 * RETURN:
1807 * None. 1739 * None.
1808 */ 1740 */
1809static void eth_port_start(struct mv643xx_private *mp) 1741static void eth_port_start(struct net_device *dev)
1810{ 1742{
1743 struct mv643xx_private *mp = netdev_priv(dev);
1811 unsigned int port_num = mp->port_num; 1744 unsigned int port_num = mp->port_num;
1812 int tx_curr_desc, rx_curr_desc; 1745 int tx_curr_desc, rx_curr_desc;
1746 u32 pscr;
1747 struct ethtool_cmd ethtool_cmd;
1813 1748
1814 /* Assignment of Tx CTRP of given queue */ 1749 /* Assignment of Tx CTRP of given queue */
1815 tx_curr_desc = mp->tx_curr_desc_q; 1750 tx_curr_desc = mp->tx_curr_desc_q;
@@ -1822,37 +1757,45 @@ static void eth_port_start(struct mv643xx_private *mp)
1822 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); 1757 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
1823 1758
1824 /* Add the assigned Ethernet address to the port's address table */ 1759 /* Add the assigned Ethernet address to the port's address table */
1825 eth_port_uc_addr_set(port_num, mp->port_mac_addr); 1760 eth_port_uc_addr_set(port_num, dev->dev_addr);
1826 1761
1827 /* Assign port configuration and command. */ 1762 /* Assign port configuration and command. */
1828 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), mp->port_config); 1763 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num),
1764 MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE);
1829 1765
1830 mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), 1766 mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num),
1831 mp->port_config_extend); 1767 MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE);
1832 1768
1769 pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
1833 1770
1834 /* Increase the Rx side buffer size if supporting GigE */ 1771 pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS);
1835 if (mp->port_serial_control & MV643XX_ETH_SET_GMII_SPEED_TO_1000) 1772 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
1836 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1773
1837 (mp->port_serial_control & 0xfff1ffff) | (0x5 << 17)); 1774 pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1838 else 1775 MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII |
1839 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1776 MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX |
1840 mp->port_serial_control); 1777 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL |
1778 MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED;
1841 1779
1842 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1780 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
1843 mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)) | 1781
1844 MV643XX_ETH_SERIAL_PORT_ENABLE); 1782 pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE;
1783 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
1845 1784
1846 /* Assign port SDMA configuration */ 1785 /* Assign port SDMA configuration */
1847 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), 1786 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num),
1848 mp->port_sdma_config); 1787 MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE);
1849 1788
1850 /* Enable port Rx. */ 1789 /* Enable port Rx. */
1851 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 1790 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED);
1852 mp->port_rx_queue_command);
1853 1791
1854 /* Disable port bandwidth limits by clearing MTU register */ 1792 /* Disable port bandwidth limits by clearing MTU register */
1855 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); 1793 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0);
1794
1795 /* save phy settings across reset */
1796 mv643xx_get_settings(dev, &ethtool_cmd);
1797 ethernet_phy_reset(mp->port_num);
1798 mv643xx_set_settings(dev, &ethtool_cmd);
1856} 1799}
1857 1800
1858/* 1801/*
@@ -1866,8 +1809,9 @@ static void eth_port_start(struct mv643xx_private *mp)
1866 * char * p_addr Address to be set 1809 * char * p_addr Address to be set
1867 * 1810 *
1868 * OUTPUT: 1811 * OUTPUT:
1869 * Set MAC address low and high registers. also calls eth_port_uc_addr() 1812 * Set MAC address low and high registers. also calls
1870 * To set the unicast table with the proper information. 1813 * eth_port_set_filter_table_entry() to set the unicast
1814 * table with the proper information.
1871 * 1815 *
1872 * RETURN: 1816 * RETURN:
1873 * N/A. 1817 * N/A.
@@ -1878,6 +1822,7 @@ static void eth_port_uc_addr_set(unsigned int eth_port_num,
1878{ 1822{
1879 unsigned int mac_h; 1823 unsigned int mac_h;
1880 unsigned int mac_l; 1824 unsigned int mac_l;
1825 int table;
1881 1826
1882 mac_l = (p_addr[4] << 8) | (p_addr[5]); 1827 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1883 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | 1828 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
@@ -1887,9 +1832,8 @@ static void eth_port_uc_addr_set(unsigned int eth_port_num,
1887 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); 1832 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h);
1888 1833
1889 /* Accept frames of this address */ 1834 /* Accept frames of this address */
1890 eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR); 1835 table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(eth_port_num);
1891 1836 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f);
1892 return;
1893} 1837}
1894 1838
1895/* 1839/*
@@ -1928,72 +1872,6 @@ static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr)
1928} 1872}
1929 1873
1930/* 1874/*
1931 * eth_port_uc_addr - This function Set the port unicast address table
1932 *
1933 * DESCRIPTION:
1934 * This function locates the proper entry in the Unicast table for the
1935 * specified MAC nibble and sets its properties according to function
1936 * parameters.
1937 *
1938 * INPUT:
1939 * unsigned int eth_port_num Port number.
1940 * unsigned char uc_nibble Unicast MAC Address last nibble.
1941 * int option 0 = Add, 1 = remove address.
1942 *
1943 * OUTPUT:
1944 * This function add/removes MAC addresses from the port unicast address
1945 * table.
1946 *
1947 * RETURN:
1948 * true is output succeeded.
1949 * false if option parameter is invalid.
1950 *
1951 */
1952static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
1953 int option)
1954{
1955 unsigned int unicast_reg;
1956 unsigned int tbl_offset;
1957 unsigned int reg_offset;
1958
1959 /* Locate the Unicast table entry */
1960 uc_nibble = (0xf & uc_nibble);
1961 tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */
1962 reg_offset = uc_nibble % 4; /* Entry offset within the above register */
1963
1964 switch (option) {
1965 case REJECT_MAC_ADDR:
1966 /* Clear accepts frame bit at given unicast DA table entry */
1967 unicast_reg = mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
1968 (eth_port_num) + tbl_offset));
1969
1970 unicast_reg &= (0x0E << (8 * reg_offset));
1971
1972 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
1973 (eth_port_num) + tbl_offset), unicast_reg);
1974 break;
1975
1976 case ACCEPT_MAC_ADDR:
1977 /* Set accepts frame bit at unicast DA filter table entry */
1978 unicast_reg =
1979 mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
1980 (eth_port_num) + tbl_offset));
1981
1982 unicast_reg |= (0x01 << (8 * reg_offset));
1983
1984 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
1985 (eth_port_num) + tbl_offset), unicast_reg);
1986
1987 break;
1988
1989 default:
1990 return 0;
1991 }
1992
1993 return 1;
1994}
1995
1996/*
1997 * The entries in each table are indexed by a hash of a packet's MAC 1875 * The entries in each table are indexed by a hash of a packet's MAC
1998 * address. One bit in each entry determines whether the packet is 1876 * address. One bit in each entry determines whether the packet is
1999 * accepted. There are 4 entries (each 8 bits wide) in each register 1877 * accepted. There are 4 entries (each 8 bits wide) in each register
@@ -2205,8 +2083,8 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2205 2083
2206 /* Clear DA filter unicast table (Ex_dFUT) */ 2084 /* Clear DA filter unicast table (Ex_dFUT) */
2207 for (table_index = 0; table_index <= 0xC; table_index += 4) 2085 for (table_index = 0; table_index <= 0xC; table_index += 4)
2208 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE 2086 mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2209 (eth_port_num) + table_index), 0); 2087 (eth_port_num) + table_index, 0);
2210 2088
2211 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2089 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2212 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2090 /* Clear DA filter special multicast table (Ex_dFSMT) */
@@ -2389,6 +2267,73 @@ static void ethernet_phy_reset(unsigned int eth_port_num)
2389 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); 2267 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
2390 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ 2268 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
2391 eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); 2269 eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
2270
2271 /* wait for PHY to come out of reset */
2272 do {
2273 udelay(1);
2274 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
2275 } while (phy_reg_data & 0x8000);
2276}
2277
2278static void mv643xx_eth_port_enable_tx(unsigned int port_num,
2279 unsigned int queues)
2280{
2281 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues);
2282}
2283
2284static void mv643xx_eth_port_enable_rx(unsigned int port_num,
2285 unsigned int queues)
2286{
2287 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues);
2288}
2289
2290static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
2291{
2292 u32 queues;
2293
2294 /* Stop Tx port activity. Check port Tx activity. */
2295 queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num))
2296 & 0xFF;
2297 if (queues) {
2298 /* Issue stop command for active queues only */
2299 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
2300 (queues << 8));
2301
2302 /* Wait for all Tx activity to terminate. */
2303 /* Check port cause register that all Tx queues are stopped */
2304 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num))
2305 & 0xFF)
2306 udelay(PHY_WAIT_MICRO_SECONDS);
2307
2308 /* Wait for Tx FIFO to empty */
2309 while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) &
2310 ETH_PORT_TX_FIFO_EMPTY)
2311 udelay(PHY_WAIT_MICRO_SECONDS);
2312 }
2313
2314 return queues;
2315}
2316
2317static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
2318{
2319 u32 queues;
2320
2321 /* Stop Rx port activity. Check port Rx activity. */
2322 queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num))
2323 & 0xFF;
2324 if (queues) {
2325 /* Issue stop command for active queues only */
2326 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
2327 (queues << 8));
2328
2329 /* Wait for all Rx activity to terminate. */
2330 /* Check port cause register that all Rx queues are stopped */
2331 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num))
2332 & 0xFF)
2333 udelay(PHY_WAIT_MICRO_SECONDS);
2334 }
2335
2336 return queues;
2392} 2337}
2393 2338
2394/* 2339/*
@@ -2413,70 +2358,21 @@ static void eth_port_reset(unsigned int port_num)
2413{ 2358{
2414 unsigned int reg_data; 2359 unsigned int reg_data;
2415 2360
2416 /* Stop Tx port activity. Check port Tx activity. */ 2361 mv643xx_eth_port_disable_tx(port_num);
2417 reg_data = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)); 2362 mv643xx_eth_port_disable_rx(port_num);
2418
2419 if (reg_data & 0xFF) {
2420 /* Issue stop command for active channels only */
2421 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
2422 (reg_data << 8));
2423
2424 /* Wait for all Tx activity to terminate. */
2425 /* Check port cause register that all Tx queues are stopped */
2426 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num))
2427 & 0xFF)
2428 udelay(10);
2429 }
2430
2431 /* Stop Rx port activity. Check port Rx activity. */
2432 reg_data = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num));
2433
2434 if (reg_data & 0xFF) {
2435 /* Issue stop command for active channels only */
2436 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
2437 (reg_data << 8));
2438
2439 /* Wait for all Rx activity to terminate. */
2440 /* Check port cause register that all Rx queues are stopped */
2441 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num))
2442 & 0xFF)
2443 udelay(10);
2444 }
2445 2363
2446 /* Clear all MIB counters */ 2364 /* Clear all MIB counters */
2447 eth_clear_mib_counters(port_num); 2365 eth_clear_mib_counters(port_num);
2448 2366
2449 /* Reset the Enable bit in the Configuration Register */ 2367 /* Reset the Enable bit in the Configuration Register */
2450 reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 2368 reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
2451 reg_data &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; 2369 reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE |
2370 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL |
2371 MV643XX_ETH_FORCE_LINK_PASS);
2452 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2372 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data);
2453} 2373}
2454 2374
2455 2375
2456static int eth_port_autoneg_supported(unsigned int eth_port_num)
2457{
2458 unsigned int phy_reg_data0;
2459
2460 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data0);
2461
2462 return phy_reg_data0 & 0x1000;
2463}
2464
2465static int eth_port_link_is_up(unsigned int eth_port_num)
2466{
2467 unsigned int phy_reg_data1;
2468
2469 eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data1);
2470
2471 if (eth_port_autoneg_supported(eth_port_num)) {
2472 if (phy_reg_data1 & 0x20) /* auto-neg complete */
2473 return 1;
2474 } else if (phy_reg_data1 & 0x4) /* link up */
2475 return 1;
2476
2477 return 0;
2478}
2479
2480/* 2376/*
2481 * eth_port_read_smi_reg - Read PHY registers 2377 * eth_port_read_smi_reg - Read PHY registers
2482 * 2378 *
@@ -2582,250 +2478,21 @@ out:
2582} 2478}
2583 2479
2584/* 2480/*
2585 * eth_port_send - Send an Ethernet packet 2481 * Wrappers for MII support library.
2586 *
2587 * DESCRIPTION:
2588 * This routine send a given packet described by p_pktinfo parameter. It
2589 * supports transmitting of a packet spaned over multiple buffers. The
2590 * routine updates 'curr' and 'first' indexes according to the packet
2591 * segment passed to the routine. In case the packet segment is first,
2592 * the 'first' index is update. In any case, the 'curr' index is updated.
2593 * If the routine get into Tx resource error it assigns 'curr' index as
2594 * 'first'. This way the function can abort Tx process of multiple
2595 * descriptors per packet.
2596 *
2597 * INPUT:
2598 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2599 * struct pkt_info *p_pkt_info User packet buffer.
2600 *
2601 * OUTPUT:
2602 * Tx ring 'curr' and 'first' indexes are updated.
2603 *
2604 * RETURN:
2605 * ETH_QUEUE_FULL in case of Tx resource error.
2606 * ETH_ERROR in case the routine can not access Tx desc ring.
2607 * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
2608 * ETH_OK otherwise.
2609 *
2610 */
2611#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2612/*
2613 * Modified to include the first descriptor pointer in case of SG
2614 */ 2482 */
2615static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, 2483static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location)
2616 struct pkt_info *p_pkt_info)
2617{
2618 int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc;
2619 struct eth_tx_desc *current_descriptor;
2620 struct eth_tx_desc *first_descriptor;
2621 u32 command;
2622
2623 /* Do not process Tx ring in case of Tx ring resource error */
2624 if (mp->tx_resource_err)
2625 return ETH_QUEUE_FULL;
2626
2627 /*
2628 * The hardware requires that each buffer that is <= 8 bytes
2629 * in length must be aligned on an 8 byte boundary.
2630 */
2631 if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) {
2632 printk(KERN_ERR
2633 "mv643xx_eth port %d: packet size <= 8 problem\n",
2634 mp->port_num);
2635 return ETH_ERROR;
2636 }
2637
2638 mp->tx_ring_skbs++;
2639 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2640
2641 /* Get the Tx Desc ring indexes */
2642 tx_desc_curr = mp->tx_curr_desc_q;
2643 tx_desc_used = mp->tx_used_desc_q;
2644
2645 current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
2646
2647 tx_next_desc = (tx_desc_curr + 1) % mp->tx_ring_size;
2648
2649 current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
2650 current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
2651 current_descriptor->l4i_chk = p_pkt_info->l4i_chk;
2652 mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info;
2653
2654 command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC |
2655 ETH_BUFFER_OWNED_BY_DMA;
2656 if (command & ETH_TX_FIRST_DESC) {
2657 tx_first_desc = tx_desc_curr;
2658 mp->tx_first_desc_q = tx_first_desc;
2659 first_descriptor = current_descriptor;
2660 mp->tx_first_command = command;
2661 } else {
2662 tx_first_desc = mp->tx_first_desc_q;
2663 first_descriptor = &mp->p_tx_desc_area[tx_first_desc];
2664 BUG_ON(first_descriptor == NULL);
2665 current_descriptor->cmd_sts = command;
2666 }
2667
2668 if (command & ETH_TX_LAST_DESC) {
2669 wmb();
2670 first_descriptor->cmd_sts = mp->tx_first_command;
2671
2672 wmb();
2673 ETH_ENABLE_TX_QUEUE(mp->port_num);
2674
2675 /*
2676 * Finish Tx packet. Update first desc in case of Tx resource
2677 * error */
2678 tx_first_desc = tx_next_desc;
2679 mp->tx_first_desc_q = tx_first_desc;
2680 }
2681
2682 /* Check for ring index overlap in the Tx desc ring */
2683 if (tx_next_desc == tx_desc_used) {
2684 mp->tx_resource_err = 1;
2685 mp->tx_curr_desc_q = tx_first_desc;
2686
2687 return ETH_QUEUE_LAST_RESOURCE;
2688 }
2689
2690 mp->tx_curr_desc_q = tx_next_desc;
2691
2692 return ETH_OK;
2693}
2694#else
2695static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2696 struct pkt_info *p_pkt_info)
2697{ 2484{
2698 int tx_desc_curr; 2485 int val;
2699 int tx_desc_used; 2486 struct mv643xx_private *mp = netdev_priv(dev);
2700 struct eth_tx_desc *current_descriptor;
2701 unsigned int command_status;
2702
2703 /* Do not process Tx ring in case of Tx ring resource error */
2704 if (mp->tx_resource_err)
2705 return ETH_QUEUE_FULL;
2706
2707 mp->tx_ring_skbs++;
2708 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2709
2710 /* Get the Tx Desc ring indexes */
2711 tx_desc_curr = mp->tx_curr_desc_q;
2712 tx_desc_used = mp->tx_used_desc_q;
2713 current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
2714
2715 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2716 current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
2717 current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
2718 mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info;
2719
2720 /* Set last desc with DMA ownership and interrupt enable. */
2721 wmb();
2722 current_descriptor->cmd_sts = command_status |
2723 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2724
2725 wmb();
2726 ETH_ENABLE_TX_QUEUE(mp->port_num);
2727
2728 /* Finish Tx packet. Update first desc in case of Tx resource error */
2729 tx_desc_curr = (tx_desc_curr + 1) % mp->tx_ring_size;
2730
2731 /* Update the current descriptor */
2732 mp->tx_curr_desc_q = tx_desc_curr;
2733
2734 /* Check for ring index overlap in the Tx desc ring */
2735 if (tx_desc_curr == tx_desc_used) {
2736 mp->tx_resource_err = 1;
2737 return ETH_QUEUE_LAST_RESOURCE;
2738 }
2739 2487
2740 return ETH_OK; 2488 eth_port_read_smi_reg(mp->port_num, location, &val);
2489 return val;
2741} 2490}
2742#endif
2743 2491
2744/* 2492static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val)
2745 * eth_tx_return_desc - Free all used Tx descriptors
2746 *
2747 * DESCRIPTION:
2748 * This routine returns the transmitted packet information to the caller.
2749 * It uses the 'first' index to support Tx desc return in case a transmit
2750 * of a packet spanned over multiple buffer still in process.
2751 * In case the Tx queue was in "resource error" condition, where there are
2752 * no available Tx resources, the function resets the resource error flag.
2753 *
2754 * INPUT:
2755 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2756 * struct pkt_info *p_pkt_info User packet buffer.
2757 *
2758 * OUTPUT:
2759 * Tx ring 'first' and 'used' indexes are updated.
2760 *
2761 * RETURN:
2762 * ETH_OK on success
2763 * ETH_ERROR otherwise.
2764 *
2765 */
2766static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2767 struct pkt_info *p_pkt_info)
2768{ 2493{
2769 int tx_desc_used; 2494 struct mv643xx_private *mp = netdev_priv(dev);
2770 int tx_busy_desc; 2495 eth_port_write_smi_reg(mp->port_num, location, val);
2771 struct eth_tx_desc *p_tx_desc_used;
2772 unsigned int command_status;
2773 unsigned long flags;
2774 int err = ETH_OK;
2775
2776 spin_lock_irqsave(&mp->lock, flags);
2777
2778#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2779 tx_busy_desc = mp->tx_first_desc_q;
2780#else
2781 tx_busy_desc = mp->tx_curr_desc_q;
2782#endif
2783
2784 /* Get the Tx Desc ring indexes */
2785 tx_desc_used = mp->tx_used_desc_q;
2786
2787 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
2788
2789 /* Sanity check */
2790 if (p_tx_desc_used == NULL) {
2791 err = ETH_ERROR;
2792 goto out;
2793 }
2794
2795 /* Stop release. About to overlap the current available Tx descriptor */
2796 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
2797 err = ETH_ERROR;
2798 goto out;
2799 }
2800
2801 command_status = p_tx_desc_used->cmd_sts;
2802
2803 /* Still transmitting... */
2804 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2805 err = ETH_ERROR;
2806 goto out;
2807 }
2808
2809 /* Pass the packet information to the caller */
2810 p_pkt_info->cmd_sts = command_status;
2811 p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
2812 p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
2813 p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
2814 mp->tx_skb[tx_desc_used] = NULL;
2815
2816 /* Update the next descriptor to release. */
2817 mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size;
2818
2819 /* Any Tx return cancels the Tx resource error status */
2820 mp->tx_resource_err = 0;
2821
2822 BUG_ON(mp->tx_ring_skbs == 0);
2823 mp->tx_ring_skbs--;
2824
2825out:
2826 spin_unlock_irqrestore(&mp->lock, flags);
2827
2828 return err;
2829} 2496}
2830 2497
2831/* 2498/*
@@ -3017,111 +2684,6 @@ static const struct mv643xx_stats mv643xx_gstrings_stats[] = {
3017#define MV643XX_STATS_LEN \ 2684#define MV643XX_STATS_LEN \
3018 sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) 2685 sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats)
3019 2686
3020static int
3021mv643xx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
3022{
3023 struct mv643xx_private *mp = netdev->priv;
3024 int port_num = mp->port_num;
3025 int autoneg = eth_port_autoneg_supported(port_num);
3026 int mode_10_bit;
3027 int auto_duplex;
3028 int half_duplex = 0;
3029 int full_duplex = 0;
3030 int auto_speed;
3031 int speed_10 = 0;
3032 int speed_100 = 0;
3033 int speed_1000 = 0;
3034
3035 u32 pcs = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
3036 u32 psr = mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num));
3037
3038 mode_10_bit = psr & MV643XX_ETH_PORT_STATUS_MODE_10_BIT;
3039
3040 if (mode_10_bit) {
3041 ecmd->supported = SUPPORTED_10baseT_Half;
3042 } else {
3043 ecmd->supported = (SUPPORTED_10baseT_Half |
3044 SUPPORTED_10baseT_Full |
3045 SUPPORTED_100baseT_Half |
3046 SUPPORTED_100baseT_Full |
3047 SUPPORTED_1000baseT_Full |
3048 (autoneg ? SUPPORTED_Autoneg : 0) |
3049 SUPPORTED_TP);
3050
3051 auto_duplex = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX);
3052 auto_speed = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII);
3053
3054 ecmd->advertising = ADVERTISED_TP;
3055
3056 if (autoneg) {
3057 ecmd->advertising |= ADVERTISED_Autoneg;
3058
3059 if (auto_duplex) {
3060 half_duplex = 1;
3061 full_duplex = 1;
3062 } else {
3063 if (pcs & MV643XX_ETH_SET_FULL_DUPLEX_MODE)
3064 full_duplex = 1;
3065 else
3066 half_duplex = 1;
3067 }
3068
3069 if (auto_speed) {
3070 speed_10 = 1;
3071 speed_100 = 1;
3072 speed_1000 = 1;
3073 } else {
3074 if (pcs & MV643XX_ETH_SET_GMII_SPEED_TO_1000)
3075 speed_1000 = 1;
3076 else if (pcs & MV643XX_ETH_SET_MII_SPEED_TO_100)
3077 speed_100 = 1;
3078 else
3079 speed_10 = 1;
3080 }
3081
3082 if (speed_10 & half_duplex)
3083 ecmd->advertising |= ADVERTISED_10baseT_Half;
3084 if (speed_10 & full_duplex)
3085 ecmd->advertising |= ADVERTISED_10baseT_Full;
3086 if (speed_100 & half_duplex)
3087 ecmd->advertising |= ADVERTISED_100baseT_Half;
3088 if (speed_100 & full_duplex)
3089 ecmd->advertising |= ADVERTISED_100baseT_Full;
3090 if (speed_1000)
3091 ecmd->advertising |= ADVERTISED_1000baseT_Full;
3092 }
3093 }
3094
3095 ecmd->port = PORT_TP;
3096 ecmd->phy_address = ethernet_phy_get(port_num);
3097
3098 ecmd->transceiver = XCVR_EXTERNAL;
3099
3100 if (netif_carrier_ok(netdev)) {
3101 if (mode_10_bit)
3102 ecmd->speed = SPEED_10;
3103 else {
3104 if (psr & MV643XX_ETH_PORT_STATUS_GMII_1000)
3105 ecmd->speed = SPEED_1000;
3106 else if (psr & MV643XX_ETH_PORT_STATUS_MII_100)
3107 ecmd->speed = SPEED_100;
3108 else
3109 ecmd->speed = SPEED_10;
3110 }
3111
3112 if (psr & MV643XX_ETH_PORT_STATUS_FULL_DUPLEX)
3113 ecmd->duplex = DUPLEX_FULL;
3114 else
3115 ecmd->duplex = DUPLEX_HALF;
3116 } else {
3117 ecmd->speed = -1;
3118 ecmd->duplex = -1;
3119 }
3120
3121 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3122 return 0;
3123}
3124
3125static void mv643xx_get_drvinfo(struct net_device *netdev, 2687static void mv643xx_get_drvinfo(struct net_device *netdev,
3126 struct ethtool_drvinfo *drvinfo) 2688 struct ethtool_drvinfo *drvinfo)
3127{ 2689{
@@ -3168,15 +2730,41 @@ static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset,
3168 } 2730 }
3169} 2731}
3170 2732
2733static u32 mv643xx_eth_get_link(struct net_device *dev)
2734{
2735 struct mv643xx_private *mp = netdev_priv(dev);
2736
2737 return mii_link_ok(&mp->mii);
2738}
2739
2740static int mv643xx_eth_nway_restart(struct net_device *dev)
2741{
2742 struct mv643xx_private *mp = netdev_priv(dev);
2743
2744 return mii_nway_restart(&mp->mii);
2745}
2746
2747static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2748{
2749 struct mv643xx_private *mp = netdev_priv(dev);
2750
2751 return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
2752}
2753
3171static struct ethtool_ops mv643xx_ethtool_ops = { 2754static struct ethtool_ops mv643xx_ethtool_ops = {
3172 .get_settings = mv643xx_get_settings, 2755 .get_settings = mv643xx_get_settings,
2756 .set_settings = mv643xx_set_settings,
3173 .get_drvinfo = mv643xx_get_drvinfo, 2757 .get_drvinfo = mv643xx_get_drvinfo,
3174 .get_link = ethtool_op_get_link, 2758 .get_link = mv643xx_eth_get_link,
3175 .get_sg = ethtool_op_get_sg, 2759 .get_sg = ethtool_op_get_sg,
3176 .set_sg = ethtool_op_set_sg, 2760 .set_sg = ethtool_op_set_sg,
3177 .get_strings = mv643xx_get_strings, 2761 .get_strings = mv643xx_get_strings,
3178 .get_stats_count = mv643xx_get_stats_count, 2762 .get_stats_count = mv643xx_get_stats_count,
3179 .get_ethtool_stats = mv643xx_get_ethtool_stats, 2763 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2764 .get_strings = mv643xx_get_strings,
2765 .get_stats_count = mv643xx_get_stats_count,
2766 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2767 .nway_reset = mv643xx_eth_nway_restart,
3180}; 2768};
3181 2769
3182/************* End ethtool support *************************/ 2770/************* End ethtool support *************************/
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index f769f9b626ea..7754d1974b9e 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -5,53 +5,16 @@
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/workqueue.h> 7#include <linux/workqueue.h>
8#include <linux/mii.h>
8 9
9#include <linux/mv643xx.h> 10#include <linux/mv643xx.h>
10 11
11#define BIT0 0x00000001
12#define BIT1 0x00000002
13#define BIT2 0x00000004
14#define BIT3 0x00000008
15#define BIT4 0x00000010
16#define BIT5 0x00000020
17#define BIT6 0x00000040
18#define BIT7 0x00000080
19#define BIT8 0x00000100
20#define BIT9 0x00000200
21#define BIT10 0x00000400
22#define BIT11 0x00000800
23#define BIT12 0x00001000
24#define BIT13 0x00002000
25#define BIT14 0x00004000
26#define BIT15 0x00008000
27#define BIT16 0x00010000
28#define BIT17 0x00020000
29#define BIT18 0x00040000
30#define BIT19 0x00080000
31#define BIT20 0x00100000
32#define BIT21 0x00200000
33#define BIT22 0x00400000
34#define BIT23 0x00800000
35#define BIT24 0x01000000
36#define BIT25 0x02000000
37#define BIT26 0x04000000
38#define BIT27 0x08000000
39#define BIT28 0x10000000
40#define BIT29 0x20000000
41#define BIT30 0x40000000
42#define BIT31 0x80000000
43
44/*
45 * The first part is the high level driver of the gigE ethernet ports.
46 */
47
48/* Checksum offload for Tx works for most packets, but 12/* Checksum offload for Tx works for most packets, but
49 * fails if previous packet sent did not use hw csum 13 * fails if previous packet sent did not use hw csum
50 */ 14 */
51#define MV643XX_CHECKSUM_OFFLOAD_TX 15#define MV643XX_CHECKSUM_OFFLOAD_TX
52#define MV643XX_NAPI 16#define MV643XX_NAPI
53#define MV643XX_TX_FAST_REFILL 17#define MV643XX_TX_FAST_REFILL
54#undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */
55#undef MV643XX_COAL 18#undef MV643XX_COAL
56 19
57/* 20/*
@@ -73,25 +36,40 @@
73#define MV643XX_RX_COAL 100 36#define MV643XX_RX_COAL 100
74#endif 37#endif
75 38
76/* 39#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
77 * The second part is the low level driver of the gigE ethernet ports. 40#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
78 */ 41#else
42#define MAX_DESCS_PER_SKB 1
43#endif
79 44
80/* 45#define ETH_VLAN_HLEN 4
81 * Header File for : MV-643xx network interface header 46#define ETH_FCS_LEN 4
82 * 47#define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */
83 * DESCRIPTION: 48#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
84 * This header file contains macros typedefs and function declaration for 49#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
85 * the Marvell Gig Bit Ethernet Controller. 50 ETH_VLAN_HLEN + ETH_FCS_LEN)
86 * 51#define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7)
87 * DEPENDENCIES: 52
88 * None. 53#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
89 * 54#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
90 */ 55
56#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
57#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
58#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
59#define ETH_INT_CAUSE_EXT 0x00000002
60#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
91 61
92/* MAC accepet/reject macros */ 62#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
93#define ACCEPT_MAC_ADDR 0 63#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
94#define REJECT_MAC_ADDR 1 64#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
65#define ETH_INT_CAUSE_PHY 0x00010000
66#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY)
67
68#define ETH_INT_MASK_ALL 0x00000000
69#define ETH_INT_MASK_ALL_EXT 0x00000000
70
71#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
72#define PHY_WAIT_MICRO_SECONDS 10
95 73
96/* Buffer offset from buffer pointer */ 74/* Buffer offset from buffer pointer */
97#define RX_BUF_OFFSET 0x2 75#define RX_BUF_OFFSET 0x2
@@ -133,88 +111,71 @@
133#define ETH_MIB_LATE_COLLISION 0x7c 111#define ETH_MIB_LATE_COLLISION 0x7c
134 112
135/* Port serial status reg (PSR) */ 113/* Port serial status reg (PSR) */
136#define ETH_INTERFACE_GMII_MII 0 114#define ETH_INTERFACE_PCM 0x00000001
137#define ETH_INTERFACE_PCM BIT0 115#define ETH_LINK_IS_UP 0x00000002
138#define ETH_LINK_IS_DOWN 0 116#define ETH_PORT_AT_FULL_DUPLEX 0x00000004
139#define ETH_LINK_IS_UP BIT1 117#define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
140#define ETH_PORT_AT_HALF_DUPLEX 0 118#define ETH_GMII_SPEED_1000 0x00000010
141#define ETH_PORT_AT_FULL_DUPLEX BIT2 119#define ETH_MII_SPEED_100 0x00000020
142#define ETH_RX_FLOW_CTRL_DISABLED 0 120#define ETH_TX_IN_PROGRESS 0x00000080
143#define ETH_RX_FLOW_CTRL_ENBALED BIT3 121#define ETH_BYPASS_ACTIVE 0x00000100
144#define ETH_GMII_SPEED_100_10 0 122#define ETH_PORT_AT_PARTITION_STATE 0x00000200
145#define ETH_GMII_SPEED_1000 BIT4 123#define ETH_PORT_TX_FIFO_EMPTY 0x00000400
146#define ETH_MII_SPEED_10 0
147#define ETH_MII_SPEED_100 BIT5
148#define ETH_NO_TX 0
149#define ETH_TX_IN_PROGRESS BIT7
150#define ETH_BYPASS_NO_ACTIVE 0
151#define ETH_BYPASS_ACTIVE BIT8
152#define ETH_PORT_NOT_AT_PARTITION_STATE 0
153#define ETH_PORT_AT_PARTITION_STATE BIT9
154#define ETH_PORT_TX_FIFO_NOT_EMPTY 0
155#define ETH_PORT_TX_FIFO_EMPTY BIT10
156
157#define ETH_DEFAULT_RX_BPDU_QUEUE_3 (BIT23 | BIT22)
158#define ETH_DEFAULT_RX_BPDU_QUEUE_4 BIT24
159#define ETH_DEFAULT_RX_BPDU_QUEUE_5 (BIT24 | BIT22)
160#define ETH_DEFAULT_RX_BPDU_QUEUE_6 (BIT24 | BIT23)
161#define ETH_DEFAULT_RX_BPDU_QUEUE_7 (BIT24 | BIT23 | BIT22)
162 124
163/* SMI reg */ 125/* SMI reg */
164#define ETH_SMI_BUSY BIT28 /* 0 - Write, 1 - Read */ 126#define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
165#define ETH_SMI_READ_VALID BIT27 /* 0 - Write, 1 - Read */ 127#define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
166#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read operation */ 128#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
167#define ETH_SMI_OPCODE_READ BIT26 /* Operation is in progress */ 129#define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
130
131/* Interrupt Cause Register Bit Definitions */
168 132
169/* SDMA command status fields macros */ 133/* SDMA command status fields macros */
170 134
171/* Tx & Rx descriptors status */ 135/* Tx & Rx descriptors status */
172#define ETH_ERROR_SUMMARY (BIT0) 136#define ETH_ERROR_SUMMARY 0x00000001
173 137
174/* Tx & Rx descriptors command */ 138/* Tx & Rx descriptors command */
175#define ETH_BUFFER_OWNED_BY_DMA (BIT31) 139#define ETH_BUFFER_OWNED_BY_DMA 0x80000000
176 140
177/* Tx descriptors status */ 141/* Tx descriptors status */
178#define ETH_LC_ERROR (0 ) 142#define ETH_LC_ERROR 0
179#define ETH_UR_ERROR (BIT1 ) 143#define ETH_UR_ERROR 0x00000002
180#define ETH_RL_ERROR (BIT2 ) 144#define ETH_RL_ERROR 0x00000004
181#define ETH_LLC_SNAP_FORMAT (BIT9 ) 145#define ETH_LLC_SNAP_FORMAT 0x00000200
182 146
183/* Rx descriptors status */ 147/* Rx descriptors status */
184#define ETH_CRC_ERROR (0 ) 148#define ETH_OVERRUN_ERROR 0x00000002
185#define ETH_OVERRUN_ERROR (BIT1 ) 149#define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
186#define ETH_MAX_FRAME_LENGTH_ERROR (BIT2 ) 150#define ETH_RESOURCE_ERROR 0x00000006
187#define ETH_RESOURCE_ERROR ((BIT2 | BIT1)) 151#define ETH_VLAN_TAGGED 0x00080000
188#define ETH_VLAN_TAGGED (BIT19) 152#define ETH_BPDU_FRAME 0x00100000
189#define ETH_BPDU_FRAME (BIT20) 153#define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
190#define ETH_TCP_FRAME_OVER_IP_V_4 (0 ) 154#define ETH_OTHER_FRAME_TYPE 0x00400000
191#define ETH_UDP_FRAME_OVER_IP_V_4 (BIT21) 155#define ETH_LAYER_2_IS_ETH_V_2 0x00800000
192#define ETH_OTHER_FRAME_TYPE (BIT22) 156#define ETH_FRAME_TYPE_IP_V_4 0x01000000
193#define ETH_LAYER_2_IS_ETH_V_2 (BIT23) 157#define ETH_FRAME_HEADER_OK 0x02000000
194#define ETH_FRAME_TYPE_IP_V_4 (BIT24) 158#define ETH_RX_LAST_DESC 0x04000000
195#define ETH_FRAME_HEADER_OK (BIT25) 159#define ETH_RX_FIRST_DESC 0x08000000
196#define ETH_RX_LAST_DESC (BIT26) 160#define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
197#define ETH_RX_FIRST_DESC (BIT27) 161#define ETH_RX_ENABLE_INTERRUPT 0x20000000
198#define ETH_UNKNOWN_DESTINATION_ADDR (BIT28) 162#define ETH_LAYER_4_CHECKSUM_OK 0x40000000
199#define ETH_RX_ENABLE_INTERRUPT (BIT29)
200#define ETH_LAYER_4_CHECKSUM_OK (BIT30)
201 163
202/* Rx descriptors byte count */ 164/* Rx descriptors byte count */
203#define ETH_FRAME_FRAGMENTED (BIT2) 165#define ETH_FRAME_FRAGMENTED 0x00000004
204 166
205/* Tx descriptors command */ 167/* Tx descriptors command */
206#define ETH_LAYER_4_CHECKSUM_FIRST_DESC (BIT10) 168#define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
207#define ETH_FRAME_SET_TO_VLAN (BIT15) 169#define ETH_FRAME_SET_TO_VLAN 0x00008000
208#define ETH_TCP_FRAME (0 ) 170#define ETH_UDP_FRAME 0x00010000
209#define ETH_UDP_FRAME (BIT16) 171#define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
210#define ETH_GEN_TCP_UDP_CHECKSUM (BIT17) 172#define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
211#define ETH_GEN_IP_V_4_CHECKSUM (BIT18) 173#define ETH_ZERO_PADDING 0x00080000
212#define ETH_ZERO_PADDING (BIT19) 174#define ETH_TX_LAST_DESC 0x00100000
213#define ETH_TX_LAST_DESC (BIT20) 175#define ETH_TX_FIRST_DESC 0x00200000
214#define ETH_TX_FIRST_DESC (BIT21) 176#define ETH_GEN_CRC 0x00400000
215#define ETH_GEN_CRC (BIT22) 177#define ETH_TX_ENABLE_INTERRUPT 0x00800000
216#define ETH_TX_ENABLE_INTERRUPT (BIT23) 178#define ETH_AUTO_MODE 0x40000000
217#define ETH_AUTO_MODE (BIT30)
218 179
219#define ETH_TX_IHL_SHIFT 11 180#define ETH_TX_IHL_SHIFT 11
220 181
@@ -324,13 +285,6 @@ struct mv643xx_mib_counters {
324 285
325struct mv643xx_private { 286struct mv643xx_private {
326 int port_num; /* User Ethernet port number */ 287 int port_num; /* User Ethernet port number */
327 u8 port_mac_addr[6]; /* User defined port MAC address.*/
328 u32 port_config; /* User port configuration value*/
329 u32 port_config_extend; /* User port config extend value*/
330 u32 port_sdma_config; /* User port SDMA config value */
331 u32 port_serial_control; /* User port serial control value */
332 u32 port_tx_queue_command; /* Port active Tx queues summary*/
333 u32 port_rx_queue_command; /* Port active Rx queues summary*/
334 288
335 u32 rx_sram_addr; /* Base address of rx sram area */ 289 u32 rx_sram_addr; /* Base address of rx sram area */
336 u32 rx_sram_size; /* Size of rx sram area */ 290 u32 rx_sram_size; /* Size of rx sram area */
@@ -338,7 +292,6 @@ struct mv643xx_private {
338 u32 tx_sram_size; /* Size of tx sram area */ 292 u32 tx_sram_size; /* Size of tx sram area */
339 293
340 int rx_resource_err; /* Rx ring resource error flag */ 294 int rx_resource_err; /* Rx ring resource error flag */
341 int tx_resource_err; /* Tx ring resource error flag */
342 295
343 /* Tx/Rx rings managment indexes fields. For driver use */ 296 /* Tx/Rx rings managment indexes fields. For driver use */
344 297
@@ -347,10 +300,6 @@ struct mv643xx_private {
347 300
348 /* Next available and first returning Tx resource */ 301 /* Next available and first returning Tx resource */
349 int tx_curr_desc_q, tx_used_desc_q; 302 int tx_curr_desc_q, tx_used_desc_q;
350#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
351 int tx_first_desc_q;
352 u32 tx_first_command;
353#endif
354 303
355#ifdef MV643XX_TX_FAST_REFILL 304#ifdef MV643XX_TX_FAST_REFILL
356 u32 tx_clean_threshold; 305 u32 tx_clean_threshold;
@@ -358,54 +307,43 @@ struct mv643xx_private {
358 307
359 struct eth_rx_desc *p_rx_desc_area; 308 struct eth_rx_desc *p_rx_desc_area;
360 dma_addr_t rx_desc_dma; 309 dma_addr_t rx_desc_dma;
361 unsigned int rx_desc_area_size; 310 int rx_desc_area_size;
362 struct sk_buff **rx_skb; 311 struct sk_buff **rx_skb;
363 312
364 struct eth_tx_desc *p_tx_desc_area; 313 struct eth_tx_desc *p_tx_desc_area;
365 dma_addr_t tx_desc_dma; 314 dma_addr_t tx_desc_dma;
366 unsigned int tx_desc_area_size; 315 int tx_desc_area_size;
367 struct sk_buff **tx_skb; 316 struct sk_buff **tx_skb;
368 317
369 struct work_struct tx_timeout_task; 318 struct work_struct tx_timeout_task;
370 319
371 /*
372 * Former struct mv643xx_eth_priv members start here
373 */
374 struct net_device_stats stats; 320 struct net_device_stats stats;
375 struct mv643xx_mib_counters mib_counters; 321 struct mv643xx_mib_counters mib_counters;
376 spinlock_t lock; 322 spinlock_t lock;
377 /* Size of Tx Ring per queue */ 323 /* Size of Tx Ring per queue */
378 unsigned int tx_ring_size; 324 int tx_ring_size;
379 /* Ammont of SKBs outstanding on Tx queue */ 325 /* Number of tx descriptors in use */
380 unsigned int tx_ring_skbs; 326 int tx_desc_count;
381 /* Size of Rx Ring per queue */ 327 /* Size of Rx Ring per queue */
382 unsigned int rx_ring_size; 328 int rx_ring_size;
383 /* Ammount of SKBs allocated to Rx Ring per queue */ 329 /* Number of rx descriptors in use */
384 unsigned int rx_ring_skbs; 330 int rx_desc_count;
385
386 /*
387 * rx_task used to fill RX ring out of bottom half context
388 */
389 struct work_struct rx_task;
390 331
391 /* 332 /*
392 * Used in case RX Ring is empty, which can be caused when 333 * Used in case RX Ring is empty, which can be caused when
393 * system does not have resources (skb's) 334 * system does not have resources (skb's)
394 */ 335 */
395 struct timer_list timeout; 336 struct timer_list timeout;
396 long rx_task_busy __attribute__ ((aligned(SMP_CACHE_BYTES)));
397 unsigned rx_timer_flag;
398 337
399 u32 rx_int_coal; 338 u32 rx_int_coal;
400 u32 tx_int_coal; 339 u32 tx_int_coal;
340 struct mii_if_info mii;
401}; 341};
402 342
403/* ethernet.h API list */
404
405/* Port operation control routines */ 343/* Port operation control routines */
406static void eth_port_init(struct mv643xx_private *mp); 344static void eth_port_init(struct mv643xx_private *mp);
407static void eth_port_reset(unsigned int eth_port_num); 345static void eth_port_reset(unsigned int eth_port_num);
408static void eth_port_start(struct mv643xx_private *mp); 346static void eth_port_start(struct net_device *dev);
409 347
410/* Port MAC address routines */ 348/* Port MAC address routines */
411static void eth_port_uc_addr_set(unsigned int eth_port_num, 349static void eth_port_uc_addr_set(unsigned int eth_port_num,
@@ -423,10 +361,6 @@ static void eth_port_read_smi_reg(unsigned int eth_port_num,
423static void eth_clear_mib_counters(unsigned int eth_port_num); 361static void eth_clear_mib_counters(unsigned int eth_port_num);
424 362
425/* Port data flow control routines */ 363/* Port data flow control routines */
426static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
427 struct pkt_info *p_pkt_info);
428static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
429 struct pkt_info *p_pkt_info);
430static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, 364static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
431 struct pkt_info *p_pkt_info); 365 struct pkt_info *p_pkt_info);
432static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, 366static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 9d6d2548c2d3..8d4999837b65 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -3,6 +3,7 @@
3 Written/copyright 1999-2001 by Donald Becker. 3 Written/copyright 1999-2001 by Donald Becker.
4 Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) 4 Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5 Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) 5 Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
6 Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
6 7
7 This software may be used and distributed according to the terms of 8 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference. 9 the GNU General Public License (GPL), incorporated herein by reference.
@@ -135,8 +136,6 @@
135 136
136 TODO: 137 TODO:
137 * big endian support with CFG:BEM instead of cpu_to_le32 138 * big endian support with CFG:BEM instead of cpu_to_le32
138 * support for an external PHY
139 * NAPI
140*/ 139*/
141 140
142#include <linux/config.h> 141#include <linux/config.h>
@@ -160,6 +159,7 @@
160#include <linux/mii.h> 159#include <linux/mii.h>
161#include <linux/crc32.h> 160#include <linux/crc32.h>
162#include <linux/bitops.h> 161#include <linux/bitops.h>
162#include <linux/prefetch.h>
163#include <asm/processor.h> /* Processor type for cache alignment. */ 163#include <asm/processor.h> /* Processor type for cache alignment. */
164#include <asm/io.h> 164#include <asm/io.h>
165#include <asm/irq.h> 165#include <asm/irq.h>
@@ -183,13 +183,11 @@
183 NETIF_MSG_TX_ERR) 183 NETIF_MSG_TX_ERR)
184static int debug = -1; 184static int debug = -1;
185 185
186/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
187static int max_interrupt_work = 20;
188static int mtu; 186static int mtu;
189 187
190/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 188/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
191 This chip uses a 512 element hash table based on the Ethernet CRC. */ 189 This chip uses a 512 element hash table based on the Ethernet CRC. */
192static int multicast_filter_limit = 100; 190static const int multicast_filter_limit = 100;
193 191
194/* Set the copy breakpoint for the copy-only-tiny-frames scheme. 192/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
195 Setting to > 1518 effectively disables this feature. */ 193 Setting to > 1518 effectively disables this feature. */
@@ -251,14 +249,11 @@ MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
251MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); 249MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
252MODULE_LICENSE("GPL"); 250MODULE_LICENSE("GPL");
253 251
254module_param(max_interrupt_work, int, 0);
255module_param(mtu, int, 0); 252module_param(mtu, int, 0);
256module_param(debug, int, 0); 253module_param(debug, int, 0);
257module_param(rx_copybreak, int, 0); 254module_param(rx_copybreak, int, 0);
258module_param_array(options, int, NULL, 0); 255module_param_array(options, int, NULL, 0);
259module_param_array(full_duplex, int, NULL, 0); 256module_param_array(full_duplex, int, NULL, 0);
260MODULE_PARM_DESC(max_interrupt_work,
261 "DP8381x maximum events handled per interrupt");
262MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); 257MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
263MODULE_PARM_DESC(debug, "DP8381x default debug level"); 258MODULE_PARM_DESC(debug, "DP8381x default debug level");
264MODULE_PARM_DESC(rx_copybreak, 259MODULE_PARM_DESC(rx_copybreak,
@@ -374,7 +369,7 @@ enum pcistuff {
374 369
375 370
376/* array of board data directly indexed by pci_tbl[x].driver_data */ 371/* array of board data directly indexed by pci_tbl[x].driver_data */
377static struct { 372static const struct {
378 const char *name; 373 const char *name;
379 unsigned long flags; 374 unsigned long flags;
380} natsemi_pci_info[] __devinitdata = { 375} natsemi_pci_info[] __devinitdata = {
@@ -691,6 +686,8 @@ struct netdev_private {
691 /* Based on MTU+slack. */ 686 /* Based on MTU+slack. */
692 unsigned int rx_buf_sz; 687 unsigned int rx_buf_sz;
693 int oom; 688 int oom;
689 /* Interrupt status */
690 u32 intr_status;
694 /* Do not touch the nic registers */ 691 /* Do not touch the nic registers */
695 int hands_off; 692 int hands_off;
696 /* external phy that is used: only valid if dev->if_port != PORT_TP */ 693 /* external phy that is used: only valid if dev->if_port != PORT_TP */
@@ -748,7 +745,8 @@ static void init_registers(struct net_device *dev);
748static int start_tx(struct sk_buff *skb, struct net_device *dev); 745static int start_tx(struct sk_buff *skb, struct net_device *dev);
749static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs); 746static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
750static void netdev_error(struct net_device *dev, int intr_status); 747static void netdev_error(struct net_device *dev, int intr_status);
751static void netdev_rx(struct net_device *dev); 748static int natsemi_poll(struct net_device *dev, int *budget);
749static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
752static void netdev_tx_done(struct net_device *dev); 750static void netdev_tx_done(struct net_device *dev);
753static int natsemi_change_mtu(struct net_device *dev, int new_mtu); 751static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
754#ifdef CONFIG_NET_POLL_CONTROLLER 752#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -776,6 +774,18 @@ static inline void __iomem *ns_ioaddr(struct net_device *dev)
776 return (void __iomem *) dev->base_addr; 774 return (void __iomem *) dev->base_addr;
777} 775}
778 776
777static inline void natsemi_irq_enable(struct net_device *dev)
778{
779 writel(1, ns_ioaddr(dev) + IntrEnable);
780 readl(ns_ioaddr(dev) + IntrEnable);
781}
782
783static inline void natsemi_irq_disable(struct net_device *dev)
784{
785 writel(0, ns_ioaddr(dev) + IntrEnable);
786 readl(ns_ioaddr(dev) + IntrEnable);
787}
788
779static void move_int_phy(struct net_device *dev, int addr) 789static void move_int_phy(struct net_device *dev, int addr)
780{ 790{
781 struct netdev_private *np = netdev_priv(dev); 791 struct netdev_private *np = netdev_priv(dev);
@@ -879,6 +889,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
879 spin_lock_init(&np->lock); 889 spin_lock_init(&np->lock);
880 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; 890 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
881 np->hands_off = 0; 891 np->hands_off = 0;
892 np->intr_status = 0;
882 893
883 /* Initial port: 894 /* Initial port:
884 * - If the nic was configured to use an external phy and if find_mii 895 * - If the nic was configured to use an external phy and if find_mii
@@ -932,6 +943,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
932 dev->do_ioctl = &netdev_ioctl; 943 dev->do_ioctl = &netdev_ioctl;
933 dev->tx_timeout = &tx_timeout; 944 dev->tx_timeout = &tx_timeout;
934 dev->watchdog_timeo = TX_TIMEOUT; 945 dev->watchdog_timeo = TX_TIMEOUT;
946 dev->poll = natsemi_poll;
947 dev->weight = 64;
948
935#ifdef CONFIG_NET_POLL_CONTROLLER 949#ifdef CONFIG_NET_POLL_CONTROLLER
936 dev->poll_controller = &natsemi_poll_controller; 950 dev->poll_controller = &natsemi_poll_controller;
937#endif 951#endif
@@ -1484,6 +1498,31 @@ static void natsemi_reset(struct net_device *dev)
1484 writel(rfcr, ioaddr + RxFilterAddr); 1498 writel(rfcr, ioaddr + RxFilterAddr);
1485} 1499}
1486 1500
1501static void reset_rx(struct net_device *dev)
1502{
1503 int i;
1504 struct netdev_private *np = netdev_priv(dev);
1505 void __iomem *ioaddr = ns_ioaddr(dev);
1506
1507 np->intr_status &= ~RxResetDone;
1508
1509 writel(RxReset, ioaddr + ChipCmd);
1510
1511 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1512 np->intr_status |= readl(ioaddr + IntrStatus);
1513 if (np->intr_status & RxResetDone)
1514 break;
1515 udelay(15);
1516 }
1517 if (i==NATSEMI_HW_TIMEOUT) {
1518 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1519 dev->name, i*15);
1520 } else if (netif_msg_hw(np)) {
1521 printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1522 dev->name, i*15);
1523 }
1524}
1525
1487static void natsemi_reload_eeprom(struct net_device *dev) 1526static void natsemi_reload_eeprom(struct net_device *dev)
1488{ 1527{
1489 struct netdev_private *np = netdev_priv(dev); 1528 struct netdev_private *np = netdev_priv(dev);
@@ -2158,68 +2197,92 @@ static void netdev_tx_done(struct net_device *dev)
2158 } 2197 }
2159} 2198}
2160 2199
2161/* The interrupt handler does all of the Rx thread work and cleans up 2200/* The interrupt handler doesn't actually handle interrupts itself, it
2162 after the Tx thread. */ 2201 * schedules a NAPI poll if there is anything to do. */
2163static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) 2202static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
2164{ 2203{
2165 struct net_device *dev = dev_instance; 2204 struct net_device *dev = dev_instance;
2166 struct netdev_private *np = netdev_priv(dev); 2205 struct netdev_private *np = netdev_priv(dev);
2167 void __iomem * ioaddr = ns_ioaddr(dev); 2206 void __iomem * ioaddr = ns_ioaddr(dev);
2168 int boguscnt = max_interrupt_work;
2169 unsigned int handled = 0;
2170 2207
2171 if (np->hands_off) 2208 if (np->hands_off)
2172 return IRQ_NONE; 2209 return IRQ_NONE;
2173 do { 2210
2174 /* Reading automatically acknowledges all int sources. */ 2211 /* Reading automatically acknowledges. */
2175 u32 intr_status = readl(ioaddr + IntrStatus); 2212 np->intr_status = readl(ioaddr + IntrStatus);
2176 2213
2177 if (netif_msg_intr(np)) 2214 if (netif_msg_intr(np))
2178 printk(KERN_DEBUG 2215 printk(KERN_DEBUG
2179 "%s: Interrupt, status %#08x, mask %#08x.\n", 2216 "%s: Interrupt, status %#08x, mask %#08x.\n",
2180 dev->name, intr_status, 2217 dev->name, np->intr_status,
2181 readl(ioaddr + IntrMask)); 2218 readl(ioaddr + IntrMask));
2182 2219
2183 if (intr_status == 0) 2220 if (!np->intr_status)
2184 break; 2221 return IRQ_NONE;
2185 handled = 1;
2186 2222
2187 if (intr_status & 2223 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2188 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2189 IntrRxErr | IntrRxOverrun)) {
2190 netdev_rx(dev);
2191 }
2192 2224
2193 if (intr_status & 2225 if (netif_rx_schedule_prep(dev)) {
2194 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { 2226 /* Disable interrupts and register for poll */
2227 natsemi_irq_disable(dev);
2228 __netif_rx_schedule(dev);
2229 }
2230 return IRQ_HANDLED;
2231}
2232
2233/* This is the NAPI poll routine. As well as the standard RX handling
2234 * it also handles all other interrupts that the chip might raise.
2235 */
2236static int natsemi_poll(struct net_device *dev, int *budget)
2237{
2238 struct netdev_private *np = netdev_priv(dev);
2239 void __iomem * ioaddr = ns_ioaddr(dev);
2240
2241 int work_to_do = min(*budget, dev->quota);
2242 int work_done = 0;
2243
2244 do {
2245 if (np->intr_status &
2246 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2195 spin_lock(&np->lock); 2247 spin_lock(&np->lock);
2196 netdev_tx_done(dev); 2248 netdev_tx_done(dev);
2197 spin_unlock(&np->lock); 2249 spin_unlock(&np->lock);
2198 } 2250 }
2199 2251
2200 /* Abnormal error summary/uncommon events handlers. */ 2252 /* Abnormal error summary/uncommon events handlers. */
2201 if (intr_status & IntrAbnormalSummary) 2253 if (np->intr_status & IntrAbnormalSummary)
2202 netdev_error(dev, intr_status); 2254 netdev_error(dev, np->intr_status);
2203 2255
2204 if (--boguscnt < 0) { 2256 if (np->intr_status &
2205 if (netif_msg_intr(np)) 2257 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2206 printk(KERN_WARNING 2258 IntrRxErr | IntrRxOverrun)) {
2207 "%s: Too much work at interrupt, " 2259 netdev_rx(dev, &work_done, work_to_do);
2208 "status=%#08x.\n",
2209 dev->name, intr_status);
2210 break;
2211 } 2260 }
2212 } while (1); 2261
2262 *budget -= work_done;
2263 dev->quota -= work_done;
2213 2264
2214 if (netif_msg_intr(np)) 2265 if (work_done >= work_to_do)
2215 printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name); 2266 return 1;
2267
2268 np->intr_status = readl(ioaddr + IntrStatus);
2269 } while (np->intr_status);
2270
2271 netif_rx_complete(dev);
2216 2272
2217 return IRQ_RETVAL(handled); 2273 /* Reenable interrupts providing nothing is trying to shut
2274 * the chip down. */
2275 spin_lock(&np->lock);
2276 if (!np->hands_off && netif_running(dev))
2277 natsemi_irq_enable(dev);
2278 spin_unlock(&np->lock);
2279
2280 return 0;
2218} 2281}
2219 2282
2220/* This routine is logically part of the interrupt handler, but separated 2283/* This routine is logically part of the interrupt handler, but separated
2221 for clarity and better register allocation. */ 2284 for clarity and better register allocation. */
2222static void netdev_rx(struct net_device *dev) 2285static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2223{ 2286{
2224 struct netdev_private *np = netdev_priv(dev); 2287 struct netdev_private *np = netdev_priv(dev);
2225 int entry = np->cur_rx % RX_RING_SIZE; 2288 int entry = np->cur_rx % RX_RING_SIZE;
@@ -2237,6 +2300,12 @@ static void netdev_rx(struct net_device *dev)
2237 entry, desc_status); 2300 entry, desc_status);
2238 if (--boguscnt < 0) 2301 if (--boguscnt < 0)
2239 break; 2302 break;
2303
2304 if (*work_done >= work_to_do)
2305 break;
2306
2307 (*work_done)++;
2308
2240 pkt_len = (desc_status & DescSizeMask) - 4; 2309 pkt_len = (desc_status & DescSizeMask) - 4;
2241 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ 2310 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2242 if (desc_status & DescMore) { 2311 if (desc_status & DescMore) {
@@ -2248,6 +2317,23 @@ static void netdev_rx(struct net_device *dev)
2248 "status %#08x.\n", dev->name, 2317 "status %#08x.\n", dev->name,
2249 np->cur_rx, desc_status); 2318 np->cur_rx, desc_status);
2250 np->stats.rx_length_errors++; 2319 np->stats.rx_length_errors++;
2320
2321 /* The RX state machine has probably
2322 * locked up beneath us. Follow the
2323 * reset procedure documented in
2324 * AN-1287. */
2325
2326 spin_lock_irq(&np->lock);
2327 reset_rx(dev);
2328 reinit_rx(dev);
2329 writel(np->ring_dma, ioaddr + RxRingPtr);
2330 check_link(dev);
2331 spin_unlock_irq(&np->lock);
2332
2333 /* We'll enable RX on exit from this
2334 * function. */
2335 break;
2336
2251 } else { 2337 } else {
2252 /* There was an error. */ 2338 /* There was an error. */
2253 np->stats.rx_errors++; 2339 np->stats.rx_errors++;
@@ -2293,7 +2379,7 @@ static void netdev_rx(struct net_device *dev)
2293 np->rx_skbuff[entry] = NULL; 2379 np->rx_skbuff[entry] = NULL;
2294 } 2380 }
2295 skb->protocol = eth_type_trans(skb, dev); 2381 skb->protocol = eth_type_trans(skb, dev);
2296 netif_rx(skb); 2382 netif_receive_skb(skb);
2297 dev->last_rx = jiffies; 2383 dev->last_rx = jiffies;
2298 np->stats.rx_packets++; 2384 np->stats.rx_packets++;
2299 np->stats.rx_bytes += pkt_len; 2385 np->stats.rx_bytes += pkt_len;
@@ -3074,9 +3160,7 @@ static int netdev_close(struct net_device *dev)
3074 del_timer_sync(&np->timer); 3160 del_timer_sync(&np->timer);
3075 disable_irq(dev->irq); 3161 disable_irq(dev->irq);
3076 spin_lock_irq(&np->lock); 3162 spin_lock_irq(&np->lock);
3077 /* Disable interrupts, and flush posted writes */ 3163 natsemi_irq_disable(dev);
3078 writel(0, ioaddr + IntrEnable);
3079 readl(ioaddr + IntrEnable);
3080 np->hands_off = 1; 3164 np->hands_off = 1;
3081 spin_unlock_irq(&np->lock); 3165 spin_unlock_irq(&np->lock);
3082 enable_irq(dev->irq); 3166 enable_irq(dev->irq);
@@ -3158,6 +3242,9 @@ static void __devexit natsemi_remove1 (struct pci_dev *pdev)
3158 * * netdev_timer: timer stopped by natsemi_suspend. 3242 * * netdev_timer: timer stopped by natsemi_suspend.
3159 * * intr_handler: doesn't acquire the spinlock. suspend calls 3243 * * intr_handler: doesn't acquire the spinlock. suspend calls
3160 * disable_irq() to enforce synchronization. 3244 * disable_irq() to enforce synchronization.
3245 * * natsemi_poll: checks before reenabling interrupts. suspend
3246 * sets hands_off, disables interrupts and then waits with
3247 * netif_poll_disable().
3161 * 3248 *
3162 * Interrupts must be disabled, otherwise hands_off can cause irq storms. 3249 * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3163 */ 3250 */
@@ -3183,6 +3270,8 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3183 spin_unlock_irq(&np->lock); 3270 spin_unlock_irq(&np->lock);
3184 enable_irq(dev->irq); 3271 enable_irq(dev->irq);
3185 3272
3273 netif_poll_disable(dev);
3274
3186 /* Update the error counts. */ 3275 /* Update the error counts. */
3187 __get_stats(dev); 3276 __get_stats(dev);
3188 3277
@@ -3235,6 +3324,7 @@ static int natsemi_resume (struct pci_dev *pdev)
3235 mod_timer(&np->timer, jiffies + 1*HZ); 3324 mod_timer(&np->timer, jiffies + 1*HZ);
3236 } 3325 }
3237 netif_device_attach(dev); 3326 netif_device_attach(dev);
3327 netif_poll_enable(dev);
3238out: 3328out:
3239 rtnl_unlock(); 3329 rtnl_unlock();
3240 return 0; 3330 return 0;
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index 8f40368cf2e9..aaebd28a1920 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -27,6 +27,7 @@ static const char version1[] =
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/jiffies.h>
30 31
31#include <asm/system.h> 32#include <asm/system.h>
32#include <asm/io.h> 33#include <asm/io.h>
@@ -365,7 +366,7 @@ static void ne_reset_8390(struct net_device *dev)
365 366
366 /* This check _should_not_ be necessary, omit eventually. */ 367 /* This check _should_not_ be necessary, omit eventually. */
367 while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) 368 while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
368 if (jiffies - reset_start_time > 2*HZ/100) { 369 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
369 printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); 370 printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
370 break; 371 break;
371 } 372 }
@@ -580,7 +581,7 @@ retry:
580#endif 581#endif
581 582
582 while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) 583 while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0)
583 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 584 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
584 printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); 585 printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
585 ne_reset_8390(dev); 586 ne_reset_8390(dev);
586 NS8390_init(dev,1); 587 NS8390_init(dev,1);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 94f782d51f0f..08b218c5bfbc 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -50,6 +50,7 @@ static const char version2[] =
50#include <linux/delay.h> 50#include <linux/delay.h>
51#include <linux/netdevice.h> 51#include <linux/netdevice.h>
52#include <linux/etherdevice.h> 52#include <linux/etherdevice.h>
53#include <linux/jiffies.h>
53 54
54#include <asm/system.h> 55#include <asm/system.h>
55#include <asm/io.h> 56#include <asm/io.h>
@@ -341,7 +342,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
341 outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); 342 outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
342 343
343 while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0) 344 while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
344 if (jiffies - reset_start_time > 2*HZ/100) { 345 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
345 if (bad_card) { 346 if (bad_card) {
346 printk(" (warning: no reset ack)"); 347 printk(" (warning: no reset ack)");
347 break; 348 break;
@@ -580,7 +581,7 @@ static void ne_reset_8390(struct net_device *dev)
580 581
581 /* This check _should_not_ be necessary, omit eventually. */ 582 /* This check _should_not_ be necessary, omit eventually. */
582 while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) 583 while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
583 if (jiffies - reset_start_time > 2*HZ/100) { 584 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
584 printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); 585 printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
585 break; 586 break;
586 } 587 }
@@ -787,7 +788,7 @@ retry:
787#endif 788#endif
788 789
789 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) 790 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
790 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 791 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
791 printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); 792 printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
792 ne_reset_8390(dev); 793 ne_reset_8390(dev);
793 NS8390_init(dev,1); 794 NS8390_init(dev,1);
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index e6df375a1d4b..2aa7b77f84f8 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -75,6 +75,7 @@ static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.o
75#include <linux/etherdevice.h> 75#include <linux/etherdevice.h>
76#include <linux/skbuff.h> 76#include <linux/skbuff.h>
77#include <linux/bitops.h> 77#include <linux/bitops.h>
78#include <linux/jiffies.h>
78 79
79#include <asm/system.h> 80#include <asm/system.h>
80#include <asm/io.h> 81#include <asm/io.h>
@@ -395,7 +396,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
395 outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); 396 outb(inb(base_addr + NE_RESET), base_addr + NE_RESET);
396 397
397 while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) 398 while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0)
398 if (jiffies - reset_start_time > 2*HZ/100) { 399 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
399 printk(" not found (no reset ack).\n"); 400 printk(" not found (no reset ack).\n");
400 retval = -ENODEV; 401 retval = -ENODEV;
401 goto out; 402 goto out;
@@ -548,7 +549,7 @@ static void ne_reset_8390(struct net_device *dev)
548 549
549 /* This check _should_not_ be necessary, omit eventually. */ 550 /* This check _should_not_ be necessary, omit eventually. */
550 while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) 551 while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
551 if (jiffies - reset_start_time > 2*HZ/100) { 552 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
552 printk("%s: ne_reset_8390() did not complete.\n", 553 printk("%s: ne_reset_8390() did not complete.\n",
553 dev->name); 554 dev->name);
554 break; 555 break;
@@ -749,7 +750,7 @@ retry:
749#endif 750#endif
750 751
751 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) 752 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
752 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 753 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
753 printk("%s: timeout waiting for Tx RDC.\n", dev->name); 754 printk("%s: timeout waiting for Tx RDC.\n", dev->name);
754 ne_reset_8390(dev); 755 ne_reset_8390(dev);
755 NS8390_init(dev,1); 756 NS8390_init(dev,1);
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index d11821dd86ed..e3ebb5803b02 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -117,7 +117,7 @@ enum ne2k_pci_chipsets {
117}; 117};
118 118
119 119
120static struct { 120static const struct {
121 char *name; 121 char *name;
122 int flags; 122 int flags;
123} pci_clone_list[] __devinitdata = { 123} pci_clone_list[] __devinitdata = {
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index b0c3b6ab6263..0fede50abd3e 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -116,6 +116,7 @@
116#include <linux/timer.h> 116#include <linux/timer.h>
117#include <linux/if_vlan.h> 117#include <linux/if_vlan.h>
118#include <linux/rtnetlink.h> 118#include <linux/rtnetlink.h>
119#include <linux/jiffies.h>
119 120
120#include <asm/io.h> 121#include <asm/io.h>
121#include <asm/uaccess.h> 122#include <asm/uaccess.h>
@@ -651,7 +652,7 @@ static void FASTCALL(phy_intr(struct net_device *ndev));
651static void fastcall phy_intr(struct net_device *ndev) 652static void fastcall phy_intr(struct net_device *ndev)
652{ 653{
653 struct ns83820 *dev = PRIV(ndev); 654 struct ns83820 *dev = PRIV(ndev);
654 static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; 655 static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
655 u32 cfg, new_cfg; 656 u32 cfg, new_cfg;
656 u32 tbisr, tanar, tanlpar; 657 u32 tbisr, tanar, tanlpar;
657 int speed, fullduplex, newlinkstate; 658 int speed, fullduplex, newlinkstate;
@@ -1607,7 +1608,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab
1607{ 1608{
1608 struct ns83820 *dev = PRIV(ndev); 1609 struct ns83820 *dev = PRIV(ndev);
1609 int timed_out = 0; 1610 int timed_out = 0;
1610 long start; 1611 unsigned long start;
1611 u32 status; 1612 u32 status;
1612 int loops = 0; 1613 int loops = 0;
1613 1614
@@ -1625,7 +1626,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab
1625 break; 1626 break;
1626 if (status & fail) 1627 if (status & fail)
1627 break; 1628 break;
1628 if ((jiffies - start) >= HZ) { 1629 if (time_after_eq(jiffies, start + HZ)) {
1629 timed_out = 1; 1630 timed_out = 1;
1630 break; 1631 break;
1631 } 1632 }
diff --git a/drivers/net/oaknet.c b/drivers/net/oaknet.c
index 62167a29debe..d0f686d6eaaa 100644
--- a/drivers/net/oaknet.c
+++ b/drivers/net/oaknet.c
@@ -20,6 +20,7 @@
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/jiffies.h>
23 24
24#include <asm/board.h> 25#include <asm/board.h>
25#include <asm/io.h> 26#include <asm/io.h>
@@ -606,7 +607,7 @@ retry:
606#endif 607#endif
607 608
608 while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) { 609 while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) {
609 if (jiffies - start > OAKNET_WAIT) { 610 if (time_after(jiffies, start + OAKNET_WAIT)) {
610 printk("%s: timeout waiting for Tx RDC.\n", dev->name); 611 printk("%s: timeout waiting for Tx RDC.\n", dev->name);
611 oaknet_reset_8390(dev); 612 oaknet_reset_8390(dev);
612 NS8390_init(dev, TRUE); 613 NS8390_init(dev, TRUE);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 48774efeec71..ce90becb8bdf 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -341,7 +341,7 @@ static void tc574_detach(struct pcmcia_device *p_dev)
341#define CS_CHECK(fn, ret) \ 341#define CS_CHECK(fn, ret) \
342 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 342 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
343 343
344static char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 344static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
345 345
346static void tc574_config(dev_link_t *link) 346static void tc574_config(dev_link_t *link)
347{ 347{
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 1c3c9c666f74..3dba50849da7 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -39,6 +39,7 @@
39#include <linux/if_arp.h> 39#include <linux/if_arp.h>
40#include <linux/ioport.h> 40#include <linux/ioport.h>
41#include <linux/bitops.h> 41#include <linux/bitops.h>
42#include <linux/jiffies.h>
42 43
43#include <pcmcia/cs_types.h> 44#include <pcmcia/cs_types.h>
44#include <pcmcia/cs.h> 45#include <pcmcia/cs.h>
@@ -115,7 +116,7 @@ struct el3_private {
115 spinlock_t lock; 116 spinlock_t lock;
116}; 117};
117 118
118static char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; 119static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
119 120
120/*====================================================================*/ 121/*====================================================================*/
121 122
@@ -796,7 +797,7 @@ static void media_check(unsigned long arg)
796 media = inw(ioaddr+WN4_MEDIA) & 0xc810; 797 media = inw(ioaddr+WN4_MEDIA) & 0xc810;
797 798
798 /* Ignore collisions unless we've had no irq's recently */ 799 /* Ignore collisions unless we've had no irq's recently */
799 if (jiffies - lp->last_irq < HZ) { 800 if (time_before(jiffies, lp->last_irq + HZ)) {
800 media &= ~0x0010; 801 media &= ~0x0010;
801 } else { 802 } else {
802 /* Try harder to detect carrier errors */ 803 /* Try harder to detect carrier errors */
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 28fe2fb4d6c0..b7ac14ba8877 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -309,7 +309,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
309static int mfc_try_io_port(dev_link_t *link) 309static int mfc_try_io_port(dev_link_t *link)
310{ 310{
311 int i, ret; 311 int i, ret;
312 static kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; 312 static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
313 313
314 for (i = 0; i < 5; i++) { 314 for (i = 0; i < 5; i++) {
315 link->io.BasePort2 = serial_base[i]; 315 link->io.BasePort2 = serial_base[i];
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 4a232254a497..787176c57fd9 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -388,7 +388,7 @@ static char *version =
388DRV_NAME " " DRV_VERSION " (Roger C. Pao)"; 388DRV_NAME " " DRV_VERSION " (Roger C. Pao)";
389#endif 389#endif
390 390
391static char *if_names[]={ 391static const char *if_names[]={
392 "Auto", "10baseT", "BNC", 392 "Auto", "10baseT", "BNC",
393}; 393};
394 394
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d85b758f3efa..b46e5f703efa 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -66,7 +66,7 @@
66 66
67#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ 67#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
68 68
69static char *if_names[] = { "auto", "10baseT", "10base2"}; 69static const char *if_names[] = { "auto", "10baseT", "10base2"};
70 70
71#ifdef PCMCIA_DEBUG 71#ifdef PCMCIA_DEBUG
72static int pc_debug = PCMCIA_DEBUG; 72static int pc_debug = PCMCIA_DEBUG;
@@ -1727,6 +1727,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1727 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), 1727 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9),
1728 PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), 1728 PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a),
1729 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), 1729 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737),
1730 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee),
1730 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), 1731 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922),
1731 PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), 1732 PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0),
1732 PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), 1733 PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578),
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 0122415dfeef..8839c4faafd6 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -59,7 +59,7 @@
59 59
60/*====================================================================*/ 60/*====================================================================*/
61 61
62static char *if_names[] = { "auto", "10baseT", "10base2"}; 62static const char *if_names[] = { "auto", "10baseT", "10base2"};
63 63
64/* Module parameters */ 64/* Module parameters */
65 65
@@ -777,7 +777,7 @@ free_cfg_mem:
777static int osi_config(dev_link_t *link) 777static int osi_config(dev_link_t *link)
778{ 778{
779 struct net_device *dev = link->priv; 779 struct net_device *dev = link->priv;
780 static kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; 780 static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
781 int i, j; 781 int i, j;
782 782
783 link->conf.Attributes |= CONF_ENABLE_SPKR; 783 link->conf.Attributes |= CONF_ENABLE_SPKR;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 593d8adee891..eed496803fe4 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -208,7 +208,7 @@ enum xirc_cmd { /* Commands */
208#define XIRCREG45_REV 15 /* Revision Register (rd) */ 208#define XIRCREG45_REV 15 /* Revision Register (rd) */
209#define XIRCREG50_IA 8 /* Individual Address (8-13) */ 209#define XIRCREG50_IA 8 /* Individual Address (8-13) */
210 210
211static char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; 211static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
212 212
213/**************** 213/****************
214 * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If 214 * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 8f6cf8c896a4..7e900572eaf8 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -26,7 +26,7 @@
26#define DRV_RELDATE "01.Nov.2005" 26#define DRV_RELDATE "01.Nov.2005"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char *version = 29static const char * const version =
30DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; 30DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
31 31
32#include <linux/module.h> 32#include <linux/module.h>
@@ -109,7 +109,7 @@ static int rx_copybreak = 200;
109 * table to translate option values from tulip 109 * table to translate option values from tulip
110 * to internal options 110 * to internal options
111 */ 111 */
112static unsigned char options_mapping[] = { 112static const unsigned char options_mapping[] = {
113 PCNET32_PORT_ASEL, /* 0 Auto-select */ 113 PCNET32_PORT_ASEL, /* 0 Auto-select */
114 PCNET32_PORT_AUI, /* 1 BNC/AUI */ 114 PCNET32_PORT_AUI, /* 1 BNC/AUI */
115 PCNET32_PORT_AUI, /* 2 AUI/BNC */ 115 PCNET32_PORT_AUI, /* 2 AUI/BNC */
@@ -733,7 +733,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1)
733 int rc; /* return code */ 733 int rc; /* return code */
734 int size; /* size of packets */ 734 int size; /* size of packets */
735 unsigned char *packet; /* source packet data */ 735 unsigned char *packet; /* source packet data */
736 static int data_len = 60; /* length of source packets */ 736 static const int data_len = 60; /* length of source packets */
737 unsigned long flags; 737 unsigned long flags;
738 unsigned long ticks; 738 unsigned long ticks;
739 739
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1474b7c5ac0b..33cec2dab942 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -132,7 +132,7 @@ struct phy_setting {
132}; 132};
133 133
134/* A mapping of all SUPPORTED settings to speed/duplex */ 134/* A mapping of all SUPPORTED settings to speed/duplex */
135static struct phy_setting settings[] = { 135static const struct phy_setting settings[] = {
136 { 136 {
137 .speed = 10000, 137 .speed = 10000,
138 .duplex = DUPLEX_FULL, 138 .duplex = DUPLEX_FULL,
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 87ee3271b17d..d4449d6d1fe4 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -123,7 +123,7 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
123#ifndef NET_DEBUG 123#ifndef NET_DEBUG
124#define NET_DEBUG 1 124#define NET_DEBUG 1
125#endif 125#endif
126static unsigned int net_debug = NET_DEBUG; 126static const unsigned int net_debug = NET_DEBUG;
127 127
128#define ENABLE(irq) if (irq != -1) enable_irq(irq) 128#define ENABLE(irq) if (irq != -1) enable_irq(irq)
129#define DISABLE(irq) if (irq != -1) disable_irq(irq) 129#define DISABLE(irq) if (irq != -1) disable_irq(irq)
@@ -351,7 +351,7 @@ static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
351typedef int (*plip_func)(struct net_device *dev, struct net_local *nl, 351typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
352 struct plip_local *snd, struct plip_local *rcv); 352 struct plip_local *snd, struct plip_local *rcv);
353 353
354static plip_func connection_state_table[] = 354static const plip_func connection_state_table[] =
355{ 355{
356 plip_none, 356 plip_none,
357 plip_receive_packet, 357 plip_receive_packet,
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index aa6540b39466..23659fd7c3a6 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -30,6 +30,7 @@
30#include <linux/ppp_channel.h> 30#include <linux/ppp_channel.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/jiffies.h>
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/string.h> 35#include <asm/string.h>
35 36
@@ -570,7 +571,7 @@ ppp_async_encode(struct asyncppp *ap)
570 * character if necessary. 571 * character if necessary.
571 */ 572 */
572 if (islcp || flag_time == 0 573 if (islcp || flag_time == 0
573 || jiffies - ap->last_xmit >= flag_time) 574 || time_after_eq(jiffies, ap->last_xmit + flag_time))
574 *buf++ = PPP_FLAG; 575 *buf++ = PPP_FLAG;
575 ap->last_xmit = jiffies; 576 ap->last_xmit = jiffies;
576 fcs = PPP_INITFCS; 577 fcs = PPP_INITFCS;
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 33cb8254e79d..33255fe8031e 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -108,7 +108,7 @@ static void
108ppp_print_hex (register __u8 * out, const __u8 * in, int count) 108ppp_print_hex (register __u8 * out, const __u8 * in, int count)
109{ 109{
110 register __u8 next_ch; 110 register __u8 next_ch;
111 static char hex[] = "0123456789ABCDEF"; 111 static const char hex[] = "0123456789ABCDEF";
112 112
113 while (count-- > 0) { 113 while (count-- > 0) {
114 next_ch = *in++; 114 next_ch = *in++;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 8cc0d0bbdf50..0ad3310290f1 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -113,11 +113,11 @@ static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
113static int num_media = 0; 113static int num_media = 0;
114 114
115/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ 115/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
116static int max_interrupt_work = 20; 116static const int max_interrupt_work = 20;
117 117
118/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 118/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
119 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ 119 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
120static int multicast_filter_limit = 32; 120static const int multicast_filter_limit = 32;
121 121
122/* MAC address length */ 122/* MAC address length */
123#define MAC_ADDR_LEN 6 123#define MAC_ADDR_LEN 6
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index b7f00d6eb6a6..79208f434ac1 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -57,23 +57,27 @@
57#include <linux/ethtool.h> 57#include <linux/ethtool.h>
58#include <linux/workqueue.h> 58#include <linux/workqueue.h>
59#include <linux/if_vlan.h> 59#include <linux/if_vlan.h>
60#include <linux/ip.h>
61#include <linux/tcp.h>
62#include <net/tcp.h>
60 63
61#include <asm/system.h> 64#include <asm/system.h>
62#include <asm/uaccess.h> 65#include <asm/uaccess.h>
63#include <asm/io.h> 66#include <asm/io.h>
67#include <asm/div64.h>
64 68
65/* local include */ 69/* local include */
66#include "s2io.h" 70#include "s2io.h"
67#include "s2io-regs.h" 71#include "s2io-regs.h"
68 72
69#define DRV_VERSION "Version 2.0.9.4" 73#define DRV_VERSION "2.0.11.2"
70 74
71/* S2io Driver name & version. */ 75/* S2io Driver name & version. */
72static char s2io_driver_name[] = "Neterion"; 76static char s2io_driver_name[] = "Neterion";
73static char s2io_driver_version[] = DRV_VERSION; 77static char s2io_driver_version[] = DRV_VERSION;
74 78
75int rxd_size[4] = {32,48,48,64}; 79static int rxd_size[4] = {32,48,48,64};
76int rxd_count[4] = {127,85,85,63}; 80static int rxd_count[4] = {127,85,85,63};
77 81
78static inline int RXD_IS_UP2DT(RxD_t *rxdp) 82static inline int RXD_IS_UP2DT(RxD_t *rxdp)
79{ 83{
@@ -168,6 +172,11 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
168 {"\n DRIVER STATISTICS"}, 172 {"\n DRIVER STATISTICS"},
169 {"single_bit_ecc_errs"}, 173 {"single_bit_ecc_errs"},
170 {"double_bit_ecc_errs"}, 174 {"double_bit_ecc_errs"},
175 ("lro_aggregated_pkts"),
176 ("lro_flush_both_count"),
177 ("lro_out_of_sequence_pkts"),
178 ("lro_flush_due_to_max_pkts"),
179 ("lro_avg_aggr_pkts"),
171}; 180};
172 181
173#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN 182#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
@@ -214,7 +223,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
214#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL 223#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
215#define END_SIGN 0x0 224#define END_SIGN 0x0
216 225
217static u64 herc_act_dtx_cfg[] = { 226static const u64 herc_act_dtx_cfg[] = {
218 /* Set address */ 227 /* Set address */
219 0x8000051536750000ULL, 0x80000515367500E0ULL, 228 0x8000051536750000ULL, 0x80000515367500E0ULL,
220 /* Write data */ 229 /* Write data */
@@ -235,7 +244,7 @@ static u64 herc_act_dtx_cfg[] = {
235 END_SIGN 244 END_SIGN
236}; 245};
237 246
238static u64 xena_mdio_cfg[] = { 247static const u64 xena_mdio_cfg[] = {
239 /* Reset PMA PLL */ 248 /* Reset PMA PLL */
240 0xC001010000000000ULL, 0xC0010100000000E0ULL, 249 0xC001010000000000ULL, 0xC0010100000000E0ULL,
241 0xC0010100008000E4ULL, 250 0xC0010100008000E4ULL,
@@ -245,7 +254,7 @@ static u64 xena_mdio_cfg[] = {
245 END_SIGN 254 END_SIGN
246}; 255};
247 256
248static u64 xena_dtx_cfg[] = { 257static const u64 xena_dtx_cfg[] = {
249 0x8000051500000000ULL, 0x80000515000000E0ULL, 258 0x8000051500000000ULL, 0x80000515000000E0ULL,
250 0x80000515D93500E4ULL, 0x8001051500000000ULL, 259 0x80000515D93500E4ULL, 0x8001051500000000ULL,
251 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 260 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
@@ -273,7 +282,7 @@ static u64 xena_dtx_cfg[] = {
273 * Constants for Fixing the MacAddress problem seen mostly on 282 * Constants for Fixing the MacAddress problem seen mostly on
274 * Alpha machines. 283 * Alpha machines.
275 */ 284 */
276static u64 fix_mac[] = { 285static const u64 fix_mac[] = {
277 0x0060000000000000ULL, 0x0060600000000000ULL, 286 0x0060000000000000ULL, 0x0060600000000000ULL,
278 0x0040600000000000ULL, 0x0000600000000000ULL, 287 0x0040600000000000ULL, 0x0000600000000000ULL,
279 0x0020600000000000ULL, 0x0060600000000000ULL, 288 0x0020600000000000ULL, 0x0060600000000000ULL,
@@ -317,6 +326,12 @@ static unsigned int indicate_max_pkts;
317static unsigned int rxsync_frequency = 3; 326static unsigned int rxsync_frequency = 3;
318/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 327/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
319static unsigned int intr_type = 0; 328static unsigned int intr_type = 0;
329/* Large receive offload feature */
330static unsigned int lro = 0;
331/* Max pkts to be aggregated by LRO at one time. If not specified,
332 * aggregation happens until we hit max IP pkt size(64K)
333 */
334static unsigned int lro_max_pkts = 0xFFFF;
320 335
321/* 336/*
322 * S2IO device table. 337 * S2IO device table.
@@ -1476,6 +1491,19 @@ static int init_nic(struct s2io_nic *nic)
1476 writel((u32) (val64 >> 32), (add + 4)); 1491 writel((u32) (val64 >> 32), (add + 4));
1477 val64 = readq(&bar0->mac_cfg); 1492 val64 = readq(&bar0->mac_cfg);
1478 1493
1494 /* Enable FCS stripping by adapter */
1495 add = &bar0->mac_cfg;
1496 val64 = readq(&bar0->mac_cfg);
1497 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1498 if (nic->device_type == XFRAME_II_DEVICE)
1499 writeq(val64, &bar0->mac_cfg);
1500 else {
1501 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1502 writel((u32) (val64), add);
1503 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1504 writel((u32) (val64 >> 32), (add + 4));
1505 }
1506
1479 /* 1507 /*
1480 * Set the time value to be inserted in the pause frame 1508 * Set the time value to be inserted in the pause frame
1481 * generated by xena. 1509 * generated by xena.
@@ -2127,7 +2155,7 @@ static void stop_nic(struct s2io_nic *nic)
2127 } 2155 }
2128} 2156}
2129 2157
2130int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) 2158static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2131{ 2159{
2132 struct net_device *dev = nic->dev; 2160 struct net_device *dev = nic->dev;
2133 struct sk_buff *frag_list; 2161 struct sk_buff *frag_list;
@@ -2569,6 +2597,8 @@ static void rx_intr_handler(ring_info_t *ring_data)
2569#ifndef CONFIG_S2IO_NAPI 2597#ifndef CONFIG_S2IO_NAPI
2570 int pkt_cnt = 0; 2598 int pkt_cnt = 0;
2571#endif 2599#endif
2600 int i;
2601
2572 spin_lock(&nic->rx_lock); 2602 spin_lock(&nic->rx_lock);
2573 if (atomic_read(&nic->card_state) == CARD_DOWN) { 2603 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2574 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n", 2604 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
@@ -2661,6 +2691,18 @@ static void rx_intr_handler(ring_info_t *ring_data)
2661 break; 2691 break;
2662#endif 2692#endif
2663 } 2693 }
2694 if (nic->lro) {
2695 /* Clear all LRO sessions before exiting */
2696 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2697 lro_t *lro = &nic->lro0_n[i];
2698 if (lro->in_use) {
2699 update_L3L4_header(nic, lro);
2700 queue_rx_frame(lro->parent);
2701 clear_lro_session(lro);
2702 }
2703 }
2704 }
2705
2664 spin_unlock(&nic->rx_lock); 2706 spin_unlock(&nic->rx_lock);
2665} 2707}
2666 2708
@@ -2852,7 +2894,7 @@ static int wait_for_cmd_complete(nic_t * sp)
2852 * void. 2894 * void.
2853 */ 2895 */
2854 2896
2855void s2io_reset(nic_t * sp) 2897static void s2io_reset(nic_t * sp)
2856{ 2898{
2857 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2899 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2858 u64 val64; 2900 u64 val64;
@@ -2940,7 +2982,7 @@ void s2io_reset(nic_t * sp)
2940 * SUCCESS on success and FAILURE on failure. 2982 * SUCCESS on success and FAILURE on failure.
2941 */ 2983 */
2942 2984
2943int s2io_set_swapper(nic_t * sp) 2985static int s2io_set_swapper(nic_t * sp)
2944{ 2986{
2945 struct net_device *dev = sp->dev; 2987 struct net_device *dev = sp->dev;
2946 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2988 XENA_dev_config_t __iomem *bar0 = sp->bar0;
@@ -3089,7 +3131,7 @@ static int wait_for_msix_trans(nic_t *nic, int i)
3089 return ret; 3131 return ret;
3090} 3132}
3091 3133
3092void restore_xmsi_data(nic_t *nic) 3134static void restore_xmsi_data(nic_t *nic)
3093{ 3135{
3094 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3136 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3095 u64 val64; 3137 u64 val64;
@@ -3180,7 +3222,7 @@ int s2io_enable_msi(nic_t *nic)
3180 return 0; 3222 return 0;
3181} 3223}
3182 3224
3183int s2io_enable_msi_x(nic_t *nic) 3225static int s2io_enable_msi_x(nic_t *nic)
3184{ 3226{
3185 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3227 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3186 u64 tx_mat, rx_mat; 3228 u64 tx_mat, rx_mat;
@@ -3668,23 +3710,32 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3668 * else schedule a tasklet to reallocate the buffers. 3710 * else schedule a tasklet to reallocate the buffers.
3669 */ 3711 */
3670 for (i = 0; i < config->rx_ring_num; i++) { 3712 for (i = 0; i < config->rx_ring_num; i++) {
3671 int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 3713 if (!sp->lro) {
3672 int level = rx_buffer_level(sp, rxb_size, i); 3714 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3673 3715 int level = rx_buffer_level(sp, rxb_size, i);
3674 if ((level == PANIC) && (!TASKLET_IN_USE)) { 3716
3675 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name); 3717 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3676 DBG_PRINT(INTR_DBG, "PANIC levels\n"); 3718 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
3677 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { 3719 dev->name);
3678 DBG_PRINT(ERR_DBG, "%s:Out of memory", 3720 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3679 dev->name); 3721 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3680 DBG_PRINT(ERR_DBG, " in ISR!!\n"); 3722 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3723 dev->name);
3724 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3725 clear_bit(0, (&sp->tasklet_status));
3726 atomic_dec(&sp->isr_cnt);
3727 return IRQ_HANDLED;
3728 }
3681 clear_bit(0, (&sp->tasklet_status)); 3729 clear_bit(0, (&sp->tasklet_status));
3682 atomic_dec(&sp->isr_cnt); 3730 } else if (level == LOW) {
3683 return IRQ_HANDLED; 3731 tasklet_schedule(&sp->task);
3684 } 3732 }
3685 clear_bit(0, (&sp->tasklet_status)); 3733 }
3686 } else if (level == LOW) { 3734 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
3687 tasklet_schedule(&sp->task); 3735 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3736 dev->name);
3737 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3738 break;
3688 } 3739 }
3689 } 3740 }
3690 3741
@@ -3697,29 +3748,37 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3697{ 3748{
3698 ring_info_t *ring = (ring_info_t *)dev_id; 3749 ring_info_t *ring = (ring_info_t *)dev_id;
3699 nic_t *sp = ring->nic; 3750 nic_t *sp = ring->nic;
3751 struct net_device *dev = (struct net_device *) dev_id;
3700 int rxb_size, level, rng_n; 3752 int rxb_size, level, rng_n;
3701 3753
3702 atomic_inc(&sp->isr_cnt); 3754 atomic_inc(&sp->isr_cnt);
3703 rx_intr_handler(ring); 3755 rx_intr_handler(ring);
3704 3756
3705 rng_n = ring->ring_no; 3757 rng_n = ring->ring_no;
3706 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); 3758 if (!sp->lro) {
3707 level = rx_buffer_level(sp, rxb_size, rng_n); 3759 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3708 3760 level = rx_buffer_level(sp, rxb_size, rng_n);
3709 if ((level == PANIC) && (!TASKLET_IN_USE)) { 3761
3710 int ret; 3762 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3711 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); 3763 int ret;
3712 DBG_PRINT(INTR_DBG, "PANIC levels\n"); 3764 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3713 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { 3765 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3714 DBG_PRINT(ERR_DBG, "Out of memory in %s", 3766 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3715 __FUNCTION__); 3767 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3768 __FUNCTION__);
3769 clear_bit(0, (&sp->tasklet_status));
3770 return IRQ_HANDLED;
3771 }
3716 clear_bit(0, (&sp->tasklet_status)); 3772 clear_bit(0, (&sp->tasklet_status));
3717 return IRQ_HANDLED; 3773 } else if (level == LOW) {
3774 tasklet_schedule(&sp->task);
3718 } 3775 }
3719 clear_bit(0, (&sp->tasklet_status));
3720 } else if (level == LOW) {
3721 tasklet_schedule(&sp->task);
3722 } 3776 }
3777 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
3778 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
3779 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3780 }
3781
3723 atomic_dec(&sp->isr_cnt); 3782 atomic_dec(&sp->isr_cnt);
3724 3783
3725 return IRQ_HANDLED; 3784 return IRQ_HANDLED;
@@ -3875,24 +3934,33 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3875 */ 3934 */
3876#ifndef CONFIG_S2IO_NAPI 3935#ifndef CONFIG_S2IO_NAPI
3877 for (i = 0; i < config->rx_ring_num; i++) { 3936 for (i = 0; i < config->rx_ring_num; i++) {
3878 int ret; 3937 if (!sp->lro) {
3879 int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 3938 int ret;
3880 int level = rx_buffer_level(sp, rxb_size, i); 3939 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3881 3940 int level = rx_buffer_level(sp, rxb_size, i);
3882 if ((level == PANIC) && (!TASKLET_IN_USE)) { 3941
3883 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name); 3942 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3884 DBG_PRINT(INTR_DBG, "PANIC levels\n"); 3943 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
3885 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { 3944 dev->name);
3886 DBG_PRINT(ERR_DBG, "%s:Out of memory", 3945 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3887 dev->name); 3946 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3888 DBG_PRINT(ERR_DBG, " in ISR!!\n"); 3947 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3948 dev->name);
3949 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3950 clear_bit(0, (&sp->tasklet_status));
3951 atomic_dec(&sp->isr_cnt);
3952 return IRQ_HANDLED;
3953 }
3889 clear_bit(0, (&sp->tasklet_status)); 3954 clear_bit(0, (&sp->tasklet_status));
3890 atomic_dec(&sp->isr_cnt); 3955 } else if (level == LOW) {
3891 return IRQ_HANDLED; 3956 tasklet_schedule(&sp->task);
3892 } 3957 }
3893 clear_bit(0, (&sp->tasklet_status)); 3958 }
3894 } else if (level == LOW) { 3959 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
3895 tasklet_schedule(&sp->task); 3960 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3961 dev->name);
3962 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
3963 break;
3896 } 3964 }
3897 } 3965 }
3898#endif 3966#endif
@@ -4129,7 +4197,7 @@ static void s2io_set_multicast(struct net_device *dev)
4129 * as defined in errno.h file on failure. 4197 * as defined in errno.h file on failure.
4130 */ 4198 */
4131 4199
4132int s2io_set_mac_addr(struct net_device *dev, u8 * addr) 4200static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4133{ 4201{
4134 nic_t *sp = dev->priv; 4202 nic_t *sp = dev->priv;
4135 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4203 XENA_dev_config_t __iomem *bar0 = sp->bar0;
@@ -5044,6 +5112,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5044 int i = 0; 5112 int i = 0;
5045 nic_t *sp = dev->priv; 5113 nic_t *sp = dev->priv;
5046 StatInfo_t *stat_info = sp->mac_control.stats_info; 5114 StatInfo_t *stat_info = sp->mac_control.stats_info;
5115 u64 tmp;
5047 5116
5048 s2io_updt_stats(sp); 5117 s2io_updt_stats(sp);
5049 tmp_stats[i++] = 5118 tmp_stats[i++] =
@@ -5135,6 +5204,16 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5135 tmp_stats[i++] = 0; 5204 tmp_stats[i++] = 0;
5136 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; 5205 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5137 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; 5206 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5207 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5208 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5209 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5210 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5211 tmp = 0;
5212 if (stat_info->sw_stat.num_aggregations) {
5213 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5214 do_div(tmp, stat_info->sw_stat.num_aggregations);
5215 }
5216 tmp_stats[i++] = tmp;
5138} 5217}
5139 5218
5140static int s2io_ethtool_get_regs_len(struct net_device *dev) 5219static int s2io_ethtool_get_regs_len(struct net_device *dev)
@@ -5516,6 +5595,14 @@ static int s2io_card_up(nic_t * sp)
5516 /* Setting its receive mode */ 5595 /* Setting its receive mode */
5517 s2io_set_multicast(dev); 5596 s2io_set_multicast(dev);
5518 5597
5598 if (sp->lro) {
5599 /* Initialize max aggregatable pkts based on MTU */
5600 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
5601 /* Check if we can use(if specified) user provided value */
5602 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
5603 sp->lro_max_aggr_per_sess = lro_max_pkts;
5604 }
5605
5519 /* Enable tasklet for the device */ 5606 /* Enable tasklet for the device */
5520 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); 5607 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
5521 5608
@@ -5608,6 +5695,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5608 ((unsigned long) rxdp->Host_Control); 5695 ((unsigned long) rxdp->Host_Control);
5609 int ring_no = ring_data->ring_no; 5696 int ring_no = ring_data->ring_no;
5610 u16 l3_csum, l4_csum; 5697 u16 l3_csum, l4_csum;
5698 lro_t *lro;
5611 5699
5612 skb->dev = dev; 5700 skb->dev = dev;
5613 if (rxdp->Control_1 & RXD_T_CODE) { 5701 if (rxdp->Control_1 & RXD_T_CODE) {
@@ -5656,7 +5744,8 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5656 skb_put(skb, buf2_len); 5744 skb_put(skb, buf2_len);
5657 } 5745 }
5658 5746
5659 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && 5747 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
5748 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
5660 (sp->rx_csum)) { 5749 (sp->rx_csum)) {
5661 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 5750 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5662 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); 5751 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
@@ -5667,6 +5756,54 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5667 * a flag in the RxD. 5756 * a flag in the RxD.
5668 */ 5757 */
5669 skb->ip_summed = CHECKSUM_UNNECESSARY; 5758 skb->ip_summed = CHECKSUM_UNNECESSARY;
5759 if (sp->lro) {
5760 u32 tcp_len;
5761 u8 *tcp;
5762 int ret = 0;
5763
5764 ret = s2io_club_tcp_session(skb->data, &tcp,
5765 &tcp_len, &lro, rxdp, sp);
5766 switch (ret) {
5767 case 3: /* Begin anew */
5768 lro->parent = skb;
5769 goto aggregate;
5770 case 1: /* Aggregate */
5771 {
5772 lro_append_pkt(sp, lro,
5773 skb, tcp_len);
5774 goto aggregate;
5775 }
5776 case 4: /* Flush session */
5777 {
5778 lro_append_pkt(sp, lro,
5779 skb, tcp_len);
5780 queue_rx_frame(lro->parent);
5781 clear_lro_session(lro);
5782 sp->mac_control.stats_info->
5783 sw_stat.flush_max_pkts++;
5784 goto aggregate;
5785 }
5786 case 2: /* Flush both */
5787 lro->parent->data_len =
5788 lro->frags_len;
5789 sp->mac_control.stats_info->
5790 sw_stat.sending_both++;
5791 queue_rx_frame(lro->parent);
5792 clear_lro_session(lro);
5793 goto send_up;
5794 case 0: /* sessions exceeded */
5795 case 5: /*
5796 * First pkt in session not
5797 * L3/L4 aggregatable
5798 */
5799 break;
5800 default:
5801 DBG_PRINT(ERR_DBG,
5802 "%s: Samadhana!!\n",
5803 __FUNCTION__);
5804 BUG();
5805 }
5806 }
5670 } else { 5807 } else {
5671 /* 5808 /*
5672 * Packet with erroneous checksum, let the 5809 * Packet with erroneous checksum, let the
@@ -5678,25 +5815,31 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5678 skb->ip_summed = CHECKSUM_NONE; 5815 skb->ip_summed = CHECKSUM_NONE;
5679 } 5816 }
5680 5817
5681 skb->protocol = eth_type_trans(skb, dev); 5818 if (!sp->lro) {
5819 skb->protocol = eth_type_trans(skb, dev);
5682#ifdef CONFIG_S2IO_NAPI 5820#ifdef CONFIG_S2IO_NAPI
5683 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { 5821 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5684 /* Queueing the vlan frame to the upper layer */ 5822 /* Queueing the vlan frame to the upper layer */
5685 vlan_hwaccel_receive_skb(skb, sp->vlgrp, 5823 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5686 RXD_GET_VLAN_TAG(rxdp->Control_2)); 5824 RXD_GET_VLAN_TAG(rxdp->Control_2));
5687 } else { 5825 } else {
5688 netif_receive_skb(skb); 5826 netif_receive_skb(skb);
5689 } 5827 }
5690#else 5828#else
5691 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { 5829 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5692 /* Queueing the vlan frame to the upper layer */ 5830 /* Queueing the vlan frame to the upper layer */
5693 vlan_hwaccel_rx(skb, sp->vlgrp, 5831 vlan_hwaccel_rx(skb, sp->vlgrp,
5694 RXD_GET_VLAN_TAG(rxdp->Control_2)); 5832 RXD_GET_VLAN_TAG(rxdp->Control_2));
5695 } else { 5833 } else {
5696 netif_rx(skb); 5834 netif_rx(skb);
5697 } 5835 }
5698#endif 5836#endif
5837 } else {
5838send_up:
5839 queue_rx_frame(skb);
5840 }
5699 dev->last_rx = jiffies; 5841 dev->last_rx = jiffies;
5842aggregate:
5700 atomic_dec(&sp->rx_bufs_left[ring_no]); 5843 atomic_dec(&sp->rx_bufs_left[ring_no]);
5701 return SUCCESS; 5844 return SUCCESS;
5702} 5845}
@@ -5714,7 +5857,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5714 * void. 5857 * void.
5715 */ 5858 */
5716 5859
5717void s2io_link(nic_t * sp, int link) 5860static void s2io_link(nic_t * sp, int link)
5718{ 5861{
5719 struct net_device *dev = (struct net_device *) sp->dev; 5862 struct net_device *dev = (struct net_device *) sp->dev;
5720 5863
@@ -5739,7 +5882,7 @@ void s2io_link(nic_t * sp, int link)
5739 * returns the revision ID of the device. 5882 * returns the revision ID of the device.
5740 */ 5883 */
5741 5884
5742int get_xena_rev_id(struct pci_dev *pdev) 5885static int get_xena_rev_id(struct pci_dev *pdev)
5743{ 5886{
5744 u8 id = 0; 5887 u8 id = 0;
5745 int ret; 5888 int ret;
@@ -5808,6 +5951,8 @@ module_param(indicate_max_pkts, int, 0);
5808#endif 5951#endif
5809module_param(rxsync_frequency, int, 0); 5952module_param(rxsync_frequency, int, 0);
5810module_param(intr_type, int, 0); 5953module_param(intr_type, int, 0);
5954module_param(lro, int, 0);
5955module_param(lro_max_pkts, int, 0);
5811 5956
5812/** 5957/**
5813 * s2io_init_nic - Initialization of the adapter . 5958 * s2io_init_nic - Initialization of the adapter .
@@ -5939,6 +6084,7 @@ Defaulting to INTA\n");
5939 else 6084 else
5940 sp->device_type = XFRAME_I_DEVICE; 6085 sp->device_type = XFRAME_I_DEVICE;
5941 6086
6087 sp->lro = lro;
5942 6088
5943 /* Initialize some PCI/PCI-X fields of the NIC. */ 6089 /* Initialize some PCI/PCI-X fields of the NIC. */
5944 s2io_init_pci(sp); 6090 s2io_init_pci(sp);
@@ -6242,6 +6388,10 @@ Defaulting to INTA\n");
6242 DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been " 6388 DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
6243 "enabled\n",dev->name); 6389 "enabled\n",dev->name);
6244 6390
6391 if (sp->lro)
6392 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
6393 dev->name);
6394
6245 /* Initialize device name */ 6395 /* Initialize device name */
6246 strcpy(sp->name, dev->name); 6396 strcpy(sp->name, dev->name);
6247 if (sp->device_type & XFRAME_II_DEVICE) 6397 if (sp->device_type & XFRAME_II_DEVICE)
@@ -6344,7 +6494,7 @@ int __init s2io_starter(void)
6344 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 6494 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6345 */ 6495 */
6346 6496
6347void s2io_closer(void) 6497static void s2io_closer(void)
6348{ 6498{
6349 pci_unregister_driver(&s2io_driver); 6499 pci_unregister_driver(&s2io_driver);
6350 DBG_PRINT(INIT_DBG, "cleanup done\n"); 6500 DBG_PRINT(INIT_DBG, "cleanup done\n");
@@ -6352,3 +6502,318 @@ void s2io_closer(void)
6352 6502
6353module_init(s2io_starter); 6503module_init(s2io_starter);
6354module_exit(s2io_closer); 6504module_exit(s2io_closer);
6505
6506static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
6507 struct tcphdr **tcp, RxD_t *rxdp)
6508{
6509 int ip_off;
6510 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
6511
6512 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
6513 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
6514 __FUNCTION__);
6515 return -1;
6516 }
6517
6518 /* TODO:
6519 * By default the VLAN field in the MAC is stripped by the card, if this
6520 * feature is turned off in rx_pa_cfg register, then the ip_off field
6521 * has to be shifted by a further 2 bytes
6522 */
6523 switch (l2_type) {
6524 case 0: /* DIX type */
6525 case 4: /* DIX type with VLAN */
6526 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
6527 break;
6528 /* LLC, SNAP etc are considered non-mergeable */
6529 default:
6530 return -1;
6531 }
6532
6533 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
6534 ip_len = (u8)((*ip)->ihl);
6535 ip_len <<= 2;
6536 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
6537
6538 return 0;
6539}
6540
6541static int check_for_socket_match(lro_t *lro, struct iphdr *ip,
6542 struct tcphdr *tcp)
6543{
6544 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6545 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
6546 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
6547 return -1;
6548 return 0;
6549}
6550
6551static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
6552{
6553 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
6554}
6555
6556static void initiate_new_session(lro_t *lro, u8 *l2h,
6557 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
6558{
6559 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6560 lro->l2h = l2h;
6561 lro->iph = ip;
6562 lro->tcph = tcp;
6563 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
6564 lro->tcp_ack = ntohl(tcp->ack_seq);
6565 lro->sg_num = 1;
6566 lro->total_len = ntohs(ip->tot_len);
6567 lro->frags_len = 0;
6568 /*
6569 * check if we saw TCP timestamp. Other consistency checks have
6570 * already been done.
6571 */
6572 if (tcp->doff == 8) {
6573 u32 *ptr;
6574 ptr = (u32 *)(tcp+1);
6575 lro->saw_ts = 1;
6576 lro->cur_tsval = *(ptr+1);
6577 lro->cur_tsecr = *(ptr+2);
6578 }
6579 lro->in_use = 1;
6580}
6581
6582static void update_L3L4_header(nic_t *sp, lro_t *lro)
6583{
6584 struct iphdr *ip = lro->iph;
6585 struct tcphdr *tcp = lro->tcph;
6586 u16 nchk;
6587 StatInfo_t *statinfo = sp->mac_control.stats_info;
6588 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6589
6590 /* Update L3 header */
6591 ip->tot_len = htons(lro->total_len);
6592 ip->check = 0;
6593 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
6594 ip->check = nchk;
6595
6596 /* Update L4 header */
6597 tcp->ack_seq = lro->tcp_ack;
6598 tcp->window = lro->window;
6599
6600 /* Update tsecr field if this session has timestamps enabled */
6601 if (lro->saw_ts) {
6602 u32 *ptr = (u32 *)(tcp + 1);
6603 *(ptr+2) = lro->cur_tsecr;
6604 }
6605
6606 /* Update counters required for calculation of
6607 * average no. of packets aggregated.
6608 */
6609 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
6610 statinfo->sw_stat.num_aggregations++;
6611}
6612
6613static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
6614 struct tcphdr *tcp, u32 l4_pyld)
6615{
6616 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6617 lro->total_len += l4_pyld;
6618 lro->frags_len += l4_pyld;
6619 lro->tcp_next_seq += l4_pyld;
6620 lro->sg_num++;
6621
6622 /* Update ack seq no. and window ad(from this pkt) in LRO object */
6623 lro->tcp_ack = tcp->ack_seq;
6624 lro->window = tcp->window;
6625
6626 if (lro->saw_ts) {
6627 u32 *ptr;
6628 /* Update tsecr and tsval from this packet */
6629 ptr = (u32 *) (tcp + 1);
6630 lro->cur_tsval = *(ptr + 1);
6631 lro->cur_tsecr = *(ptr + 2);
6632 }
6633}
6634
6635static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
6636 struct tcphdr *tcp, u32 tcp_pyld_len)
6637{
6638 u8 *ptr;
6639
6640 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6641
6642 if (!tcp_pyld_len) {
6643 /* Runt frame or a pure ack */
6644 return -1;
6645 }
6646
6647 if (ip->ihl != 5) /* IP has options */
6648 return -1;
6649
6650 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
6651 !tcp->ack) {
6652 /*
6653 * Currently recognize only the ack control word and
6654 * any other control field being set would result in
6655 * flushing the LRO session
6656 */
6657 return -1;
6658 }
6659
6660 /*
6661 * Allow only one TCP timestamp option. Don't aggregate if
6662 * any other options are detected.
6663 */
6664 if (tcp->doff != 5 && tcp->doff != 8)
6665 return -1;
6666
6667 if (tcp->doff == 8) {
6668 ptr = (u8 *)(tcp + 1);
6669 while (*ptr == TCPOPT_NOP)
6670 ptr++;
6671 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
6672 return -1;
6673
6674 /* Ensure timestamp value increases monotonically */
6675 if (l_lro)
6676 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
6677 return -1;
6678
6679 /* timestamp echo reply should be non-zero */
6680 if (*((u32 *)(ptr+6)) == 0)
6681 return -1;
6682 }
6683
6684 return 0;
6685}
6686
6687static int
6688s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
6689 RxD_t *rxdp, nic_t *sp)
6690{
6691 struct iphdr *ip;
6692 struct tcphdr *tcph;
6693 int ret = 0, i;
6694
6695 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
6696 rxdp))) {
6697 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
6698 ip->saddr, ip->daddr);
6699 } else {
6700 return ret;
6701 }
6702
6703 tcph = (struct tcphdr *)*tcp;
6704 *tcp_len = get_l4_pyld_length(ip, tcph);
6705 for (i=0; i<MAX_LRO_SESSIONS; i++) {
6706 lro_t *l_lro = &sp->lro0_n[i];
6707 if (l_lro->in_use) {
6708 if (check_for_socket_match(l_lro, ip, tcph))
6709 continue;
6710 /* Sock pair matched */
6711 *lro = l_lro;
6712
6713 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
6714 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
6715 "0x%x, actual 0x%x\n", __FUNCTION__,
6716 (*lro)->tcp_next_seq,
6717 ntohl(tcph->seq));
6718
6719 sp->mac_control.stats_info->
6720 sw_stat.outof_sequence_pkts++;
6721 ret = 2;
6722 break;
6723 }
6724
6725 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
6726 ret = 1; /* Aggregate */
6727 else
6728 ret = 2; /* Flush both */
6729 break;
6730 }
6731 }
6732
6733 if (ret == 0) {
6734 /* Before searching for available LRO objects,
6735 * check if the pkt is L3/L4 aggregatable. If not
6736 * don't create new LRO session. Just send this
6737 * packet up.
6738 */
6739 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
6740 return 5;
6741 }
6742
6743 for (i=0; i<MAX_LRO_SESSIONS; i++) {
6744 lro_t *l_lro = &sp->lro0_n[i];
6745 if (!(l_lro->in_use)) {
6746 *lro = l_lro;
6747 ret = 3; /* Begin anew */
6748 break;
6749 }
6750 }
6751 }
6752
6753 if (ret == 0) { /* sessions exceeded */
6754 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
6755 __FUNCTION__);
6756 *lro = NULL;
6757 return ret;
6758 }
6759
6760 switch (ret) {
6761 case 3:
6762 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
6763 break;
6764 case 2:
6765 update_L3L4_header(sp, *lro);
6766 break;
6767 case 1:
6768 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
6769 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
6770 update_L3L4_header(sp, *lro);
6771 ret = 4; /* Flush the LRO */
6772 }
6773 break;
6774 default:
6775 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
6776 __FUNCTION__);
6777 break;
6778 }
6779
6780 return ret;
6781}
6782
6783static void clear_lro_session(lro_t *lro)
6784{
6785 static u16 lro_struct_size = sizeof(lro_t);
6786
6787 memset(lro, 0, lro_struct_size);
6788}
6789
6790static void queue_rx_frame(struct sk_buff *skb)
6791{
6792 struct net_device *dev = skb->dev;
6793
6794 skb->protocol = eth_type_trans(skb, dev);
6795#ifdef CONFIG_S2IO_NAPI
6796 netif_receive_skb(skb);
6797#else
6798 netif_rx(skb);
6799#endif
6800}
6801
6802static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
6803 u32 tcp_len)
6804{
6805 struct sk_buff *tmp, *first = lro->parent;
6806
6807 first->len += tcp_len;
6808 first->data_len = lro->frags_len;
6809 skb_pull(skb, (skb->len - tcp_len));
6810 if ((tmp = skb_shinfo(first)->frag_list)) {
6811 while (tmp->next)
6812 tmp = tmp->next;
6813 tmp->next = skb;
6814 }
6815 else
6816 skb_shinfo(first)->frag_list = skb;
6817 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
6818 return;
6819}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 852a6a899d07..0a0b5b29d81e 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -64,7 +64,7 @@ typedef enum xena_max_outstanding_splits {
64#define INTR_DBG 4 64#define INTR_DBG 4
65 65
66/* Global variable that defines the present debug level of the driver. */ 66/* Global variable that defines the present debug level of the driver. */
67int debug_level = ERR_DBG; /* Default level. */ 67static int debug_level = ERR_DBG;
68 68
69/* DEBUG message print. */ 69/* DEBUG message print. */
70#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) 70#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
@@ -78,6 +78,13 @@ int debug_level = ERR_DBG; /* Default level. */
78typedef struct { 78typedef struct {
79 unsigned long long single_ecc_errs; 79 unsigned long long single_ecc_errs;
80 unsigned long long double_ecc_errs; 80 unsigned long long double_ecc_errs;
81 /* LRO statistics */
82 unsigned long long clubbed_frms_cnt;
83 unsigned long long sending_both;
84 unsigned long long outof_sequence_pkts;
85 unsigned long long flush_max_pkts;
86 unsigned long long sum_avg_pkts_aggregated;
87 unsigned long long num_aggregations;
81} swStat_t; 88} swStat_t;
82 89
83/* The statistics block of Xena */ 90/* The statistics block of Xena */
@@ -268,7 +275,7 @@ typedef struct stat_block {
268#define MAX_RX_RINGS 8 275#define MAX_RX_RINGS 8
269 276
270/* FIFO mappings for all possible number of fifos configured */ 277/* FIFO mappings for all possible number of fifos configured */
271int fifo_map[][MAX_TX_FIFOS] = { 278static int fifo_map[][MAX_TX_FIFOS] = {
272 {0, 0, 0, 0, 0, 0, 0, 0}, 279 {0, 0, 0, 0, 0, 0, 0, 0},
273 {0, 0, 0, 0, 1, 1, 1, 1}, 280 {0, 0, 0, 0, 1, 1, 1, 1},
274 {0, 0, 0, 1, 1, 1, 2, 2}, 281 {0, 0, 0, 1, 1, 1, 2, 2},
@@ -680,6 +687,24 @@ struct msix_info_st {
680 u64 data; 687 u64 data;
681}; 688};
682 689
690/* Data structure to represent a LRO session */
691typedef struct lro {
692 struct sk_buff *parent;
693 u8 *l2h;
694 struct iphdr *iph;
695 struct tcphdr *tcph;
696 u32 tcp_next_seq;
697 u32 tcp_ack;
698 int total_len;
699 int frags_len;
700 int sg_num;
701 int in_use;
702 u16 window;
703 u32 cur_tsval;
704 u32 cur_tsecr;
705 u8 saw_ts;
706}lro_t;
707
683/* Structure representing one instance of the NIC */ 708/* Structure representing one instance of the NIC */
684struct s2io_nic { 709struct s2io_nic {
685 int rxd_mode; 710 int rxd_mode;
@@ -784,6 +809,13 @@ struct s2io_nic {
784#define XFRAME_II_DEVICE 2 809#define XFRAME_II_DEVICE 2
785 u8 device_type; 810 u8 device_type;
786 811
812#define MAX_LRO_SESSIONS 32
813 lro_t lro0_n[MAX_LRO_SESSIONS];
814 unsigned long clubbed_frms_cnt;
815 unsigned long sending_both;
816 u8 lro;
817 u16 lro_max_aggr_per_sess;
818
787#define INTA 0 819#define INTA 0
788#define MSI 1 820#define MSI 1
789#define MSI_X 2 821#define MSI_X 2
@@ -911,18 +943,16 @@ static void tx_intr_handler(fifo_info_t *fifo_data);
911static void alarm_intr_handler(struct s2io_nic *sp); 943static void alarm_intr_handler(struct s2io_nic *sp);
912 944
913static int s2io_starter(void); 945static int s2io_starter(void);
914void s2io_closer(void);
915static void s2io_tx_watchdog(struct net_device *dev); 946static void s2io_tx_watchdog(struct net_device *dev);
916static void s2io_tasklet(unsigned long dev_addr); 947static void s2io_tasklet(unsigned long dev_addr);
917static void s2io_set_multicast(struct net_device *dev); 948static void s2io_set_multicast(struct net_device *dev);
918static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); 949static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp);
919void s2io_link(nic_t * sp, int link); 950static void s2io_link(nic_t * sp, int link);
920void s2io_reset(nic_t * sp);
921#if defined(CONFIG_S2IO_NAPI) 951#if defined(CONFIG_S2IO_NAPI)
922static int s2io_poll(struct net_device *dev, int *budget); 952static int s2io_poll(struct net_device *dev, int *budget);
923#endif 953#endif
924static void s2io_init_pci(nic_t * sp); 954static void s2io_init_pci(nic_t * sp);
925int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 955static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
926static void s2io_alarm_handle(unsigned long data); 956static void s2io_alarm_handle(unsigned long data);
927static int s2io_enable_msi(nic_t *nic); 957static int s2io_enable_msi(nic_t *nic);
928static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs); 958static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs);
@@ -930,14 +960,19 @@ static irqreturn_t
930s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs); 960s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs);
931static irqreturn_t 961static irqreturn_t
932s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs); 962s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs);
933int s2io_enable_msi_x(nic_t *nic);
934static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); 963static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
935static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); 964static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
936static struct ethtool_ops netdev_ethtool_ops; 965static struct ethtool_ops netdev_ethtool_ops;
937static void s2io_set_link(unsigned long data); 966static void s2io_set_link(unsigned long data);
938int s2io_set_swapper(nic_t * sp); 967static int s2io_set_swapper(nic_t * sp);
939static void s2io_card_down(nic_t *nic); 968static void s2io_card_down(nic_t *nic);
940static int s2io_card_up(nic_t *nic); 969static int s2io_card_up(nic_t *nic);
941int get_xena_rev_id(struct pci_dev *pdev); 970static int get_xena_rev_id(struct pci_dev *pdev);
942void restore_xmsi_data(nic_t *nic); 971static void restore_xmsi_data(nic_t *nic);
972
973static int s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, RxD_t *rxdp, nic_t *sp);
974static void clear_lro_session(lro_t *lro);
975static void queue_rx_frame(struct sk_buff *skb);
976static void update_L3L4_header(nic_t *sp, lro_t *lro);
977static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len);
943#endif /* _S2IO_H */ 978#endif /* _S2IO_H */
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 76139478c3df..66cf226c4ee3 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -59,7 +59,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
59#ifdef SB1000_DEBUG 59#ifdef SB1000_DEBUG
60static int sb1000_debug = SB1000_DEBUG; 60static int sb1000_debug = SB1000_DEBUG;
61#else 61#else
62static int sb1000_debug = 1; 62static const int sb1000_debug = 1;
63#endif 63#endif
64 64
65static const int SB1000_IO_EXTENT = 8; 65static const int SB1000_IO_EXTENT = 8;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index aa4ca1821759..f2be9f83f091 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2001,2002,2003 Broadcom Corporation 2 * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -43,6 +43,7 @@
43#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00" 43#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
44#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01" 44#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
45#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02" 45#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
46#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
46#endif 47#endif
47 48
48 49
@@ -57,7 +58,7 @@ static char version1[] __devinitdata =
57 58
58#define CONFIG_SBMAC_COALESCE 59#define CONFIG_SBMAC_COALESCE
59 60
60#define MAX_UNITS 3 /* More are supported, limit only on options */ 61#define MAX_UNITS 4 /* More are supported, limit only on options */
61 62
62/* Time in jiffies before concluding the transmitter is hung. */ 63/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (2*HZ) 64#define TX_TIMEOUT (2*HZ)
@@ -85,11 +86,11 @@ MODULE_PARM_DESC(noisy_mii, "MII status messages");
85 The media type is usually passed in 'options[]'. 86 The media type is usually passed in 'options[]'.
86*/ 87*/
87#ifdef MODULE 88#ifdef MODULE
88static int options[MAX_UNITS] = {-1, -1, -1}; 89static int options[MAX_UNITS] = {-1, -1, -1, -1};
89module_param_array(options, int, NULL, S_IRUGO); 90module_param_array(options, int, NULL, S_IRUGO);
90MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS)); 91MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS));
91 92
92static int full_duplex[MAX_UNITS] = {-1, -1, -1}; 93static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1};
93module_param_array(full_duplex, int, NULL, S_IRUGO); 94module_param_array(full_duplex, int, NULL, S_IRUGO);
94MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS)); 95MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS));
95#endif 96#endif
@@ -105,13 +106,26 @@ MODULE_PARM_DESC(int_timeout, "Timeout value");
105#endif 106#endif
106 107
107#include <asm/sibyte/sb1250.h> 108#include <asm/sibyte/sb1250.h>
108#include <asm/sibyte/sb1250_defs.h> 109#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
110#include <asm/sibyte/bcm1480_regs.h>
111#include <asm/sibyte/bcm1480_int.h>
112#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
109#include <asm/sibyte/sb1250_regs.h> 113#include <asm/sibyte/sb1250_regs.h>
110#include <asm/sibyte/sb1250_mac.h>
111#include <asm/sibyte/sb1250_dma.h>
112#include <asm/sibyte/sb1250_int.h> 114#include <asm/sibyte/sb1250_int.h>
115#else
116#error invalid SiByte MAC configuation
117#endif
113#include <asm/sibyte/sb1250_scd.h> 118#include <asm/sibyte/sb1250_scd.h>
119#include <asm/sibyte/sb1250_mac.h>
120#include <asm/sibyte/sb1250_dma.h>
114 121
122#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
123#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
124#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
125#define UNIT_INT(n) (K_INT_MAC_0 + (n))
126#else
127#error invalid SiByte MAC configuation
128#endif
115 129
116/********************************************************************** 130/**********************************************************************
117 * Simple types 131 * Simple types
@@ -1476,10 +1490,10 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1476 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above 1490 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
1477 * Use a larger RD_THRSH for gigabit 1491 * Use a larger RD_THRSH for gigabit
1478 */ 1492 */
1479 if (periph_rev >= 2) 1493 if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
1480 th_value = 64;
1481 else
1482 th_value = 28; 1494 th_value = 28;
1495 else
1496 th_value = 64;
1483 1497
1484 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ 1498 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1485 ((s->sbm_speed == sbmac_speed_1000) 1499 ((s->sbm_speed == sbmac_speed_1000)
@@ -1589,13 +1603,17 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1589 * Turn on the rest of the bits in the enable register 1603 * Turn on the rest of the bits in the enable register
1590 */ 1604 */
1591 1605
1606#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
1607 __raw_writeq(M_MAC_RXDMA_EN0 |
1608 M_MAC_TXDMA_EN0, s->sbm_macenable);
1609#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
1592 __raw_writeq(M_MAC_RXDMA_EN0 | 1610 __raw_writeq(M_MAC_RXDMA_EN0 |
1593 M_MAC_TXDMA_EN0 | 1611 M_MAC_TXDMA_EN0 |
1594 M_MAC_RX_ENABLE | 1612 M_MAC_RX_ENABLE |
1595 M_MAC_TX_ENABLE, s->sbm_macenable); 1613 M_MAC_TX_ENABLE, s->sbm_macenable);
1596 1614#else
1597 1615#error invalid SiByte MAC configuation
1598 1616#endif
1599 1617
1600#ifdef CONFIG_SBMAC_COALESCE 1618#ifdef CONFIG_SBMAC_COALESCE
1601 /* 1619 /*
@@ -1786,11 +1804,12 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
1786 reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); 1804 reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
1787 __raw_writeq(reg, sc->sbm_rxfilter); 1805 __raw_writeq(reg, sc->sbm_rxfilter);
1788 1806
1789 /* read system identification to determine revision */ 1807 /* BCM1250 pass1 didn't have hardware checksum. Everything
1790 if (periph_rev >= 2) { 1808 later does. */
1791 sc->rx_hw_checksum = ENABLE; 1809 if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
1792 } else {
1793 sc->rx_hw_checksum = DISABLE; 1810 sc->rx_hw_checksum = DISABLE;
1811 } else {
1812 sc->rx_hw_checksum = ENABLE;
1794 } 1813 }
1795} 1814}
1796 1815
@@ -2220,7 +2239,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2220 2239
2221 2240
2222 2241
2223#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) 2242#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2224/********************************************************************** 2243/**********************************************************************
2225 * SBMAC_PARSE_XDIGIT(str) 2244 * SBMAC_PARSE_XDIGIT(str)
2226 * 2245 *
@@ -2792,7 +2811,7 @@ static int sbmac_close(struct net_device *dev)
2792 2811
2793 2812
2794 2813
2795#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) 2814#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2796static void 2815static void
2797sbmac_setup_hwaddr(int chan,char *addr) 2816sbmac_setup_hwaddr(int chan,char *addr)
2798{ 2817{
@@ -2818,25 +2837,7 @@ sbmac_init_module(void)
2818 unsigned long port; 2837 unsigned long port;
2819 int chip_max_units; 2838 int chip_max_units;
2820 2839
2821 /* 2840 /* Set the number of available units based on the SOC type. */
2822 * For bringup when not using the firmware, we can pre-fill
2823 * the MAC addresses using the environment variables
2824 * specified in this file (or maybe from the config file?)
2825 */
2826#ifdef SBMAC_ETH0_HWADDR
2827 sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR);
2828#endif
2829#ifdef SBMAC_ETH1_HWADDR
2830 sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR);
2831#endif
2832#ifdef SBMAC_ETH2_HWADDR
2833 sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR);
2834#endif
2835
2836 /*
2837 * Walk through the Ethernet controllers and find
2838 * those who have their MAC addresses set.
2839 */
2840 switch (soc_type) { 2841 switch (soc_type) {
2841 case K_SYS_SOC_TYPE_BCM1250: 2842 case K_SYS_SOC_TYPE_BCM1250:
2842 case K_SYS_SOC_TYPE_BCM1250_ALT: 2843 case K_SYS_SOC_TYPE_BCM1250_ALT:
@@ -2848,6 +2849,10 @@ sbmac_init_module(void)
2848 case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ 2849 case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
2849 chip_max_units = 2; 2850 chip_max_units = 2;
2850 break; 2851 break;
2852 case K_SYS_SOC_TYPE_BCM1x55:
2853 case K_SYS_SOC_TYPE_BCM1x80:
2854 chip_max_units = 4;
2855 break;
2851 default: 2856 default:
2852 chip_max_units = 0; 2857 chip_max_units = 0;
2853 break; 2858 break;
@@ -2855,6 +2860,32 @@ sbmac_init_module(void)
2855 if (chip_max_units > MAX_UNITS) 2860 if (chip_max_units > MAX_UNITS)
2856 chip_max_units = MAX_UNITS; 2861 chip_max_units = MAX_UNITS;
2857 2862
2863 /*
2864 * For bringup when not using the firmware, we can pre-fill
2865 * the MAC addresses using the environment variables
2866 * specified in this file (or maybe from the config file?)
2867 */
2868#ifdef SBMAC_ETH0_HWADDR
2869 if (chip_max_units > 0)
2870 sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR);
2871#endif
2872#ifdef SBMAC_ETH1_HWADDR
2873 if (chip_max_units > 1)
2874 sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR);
2875#endif
2876#ifdef SBMAC_ETH2_HWADDR
2877 if (chip_max_units > 2)
2878 sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR);
2879#endif
2880#ifdef SBMAC_ETH3_HWADDR
2881 if (chip_max_units > 3)
2882 sbmac_setup_hwaddr(3,SBMAC_ETH3_HWADDR);
2883#endif
2884
2885 /*
2886 * Walk through the Ethernet controllers and find
2887 * those who have their MAC addresses set.
2888 */
2858 for (idx = 0; idx < chip_max_units; idx++) { 2889 for (idx = 0; idx < chip_max_units; idx++) {
2859 2890
2860 /* 2891 /*
@@ -2886,7 +2917,7 @@ sbmac_init_module(void)
2886 2917
2887 printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); 2918 printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
2888 2919
2889 dev->irq = K_INT_MAC_0 + idx; 2920 dev->irq = UNIT_INT(idx);
2890 dev->base_addr = port; 2921 dev->base_addr = port;
2891 dev->mem_end = 0; 2922 dev->mem_end = 0;
2892 if (sbmac_init(dev, idx)) { 2923 if (sbmac_init(dev, idx)) {
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 79dca398f3ac..bcef03feb2fc 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -46,6 +46,7 @@ static const char version[] =
46#include <linux/etherdevice.h> 46#include <linux/etherdevice.h>
47#include <linux/skbuff.h> 47#include <linux/skbuff.h>
48#include <linux/bitops.h> 48#include <linux/bitops.h>
49#include <linux/jiffies.h>
49 50
50#include <asm/system.h> 51#include <asm/system.h>
51#include <asm/io.h> 52#include <asm/io.h>
@@ -699,7 +700,7 @@ static void hardware_send_packet(struct net_device * dev, char *buf, int length)
699 int ioaddr = dev->base_addr; 700 int ioaddr = dev->base_addr;
700 int status = inw(SEEQ_STATUS); 701 int status = inw(SEEQ_STATUS);
701 int transmit_ptr = 0; 702 int transmit_ptr = 0;
702 int tmp; 703 unsigned long tmp;
703 704
704 if (net_debug>4) { 705 if (net_debug>4) {
705 printk("%s: send 0x%04x\n",dev->name,length); 706 printk("%s: send 0x%04x\n",dev->name,length);
@@ -724,7 +725,7 @@ static void hardware_send_packet(struct net_device * dev, char *buf, int length)
724 725
725 /* drain FIFO */ 726 /* drain FIFO */
726 tmp = jiffies; 727 tmp = jiffies;
727 while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && (jiffies - tmp < HZ)) 728 while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && time_before(jiffies, tmp + HZ))
728 mb(); 729 mb();
729 730
730 /* doit ! */ 731 /* doit ! */
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index a4614df38a90..f95a5b0223fb 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -3,6 +3,9 @@
3 * 3 *
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
5 */ 5 */
6
7#undef DEBUG
8
6#include <linux/kernel.h> 9#include <linux/kernel.h>
7#include <linux/module.h> 10#include <linux/module.h>
8#include <linux/errno.h> 11#include <linux/errno.h>
@@ -59,8 +62,6 @@ static char *sgiseeqstr = "SGI Seeq8003";
59 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ 62 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
60 sp->tx_old - sp->tx_new - 1) 63 sp->tx_old - sp->tx_new - 1)
61 64
62#define DEBUG
63
64struct sgiseeq_rx_desc { 65struct sgiseeq_rx_desc {
65 volatile struct hpc_dma_desc rdma; 66 volatile struct hpc_dma_desc rdma;
66 volatile signed int buf_vaddr; 67 volatile signed int buf_vaddr;
@@ -209,7 +210,7 @@ static int seeq_init_ring(struct net_device *dev)
209static struct sgiseeq_private *gpriv; 210static struct sgiseeq_private *gpriv;
210static struct net_device *gdev; 211static struct net_device *gdev;
211 212
212void sgiseeq_dump_rings(void) 213static void sgiseeq_dump_rings(void)
213{ 214{
214 static int once; 215 static int once;
215 struct sgiseeq_rx_desc *r = gpriv->rx_desc; 216 struct sgiseeq_rx_desc *r = gpriv->rx_desc;
@@ -311,9 +312,9 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
311 struct sgiseeq_regs *sregs) 312 struct sgiseeq_regs *sregs)
312{ 313{
313 struct sgiseeq_rx_desc *rd; 314 struct sgiseeq_rx_desc *rd;
314 struct sk_buff *skb = 0; 315 struct sk_buff *skb = NULL;
315 unsigned char pkt_status; 316 unsigned char pkt_status;
316 unsigned char *pkt_pointer = 0; 317 unsigned char *pkt_pointer = NULL;
317 int len = 0; 318 int len = 0;
318 unsigned int orig_end = PREV_RX(sp->rx_new); 319 unsigned int orig_end = PREV_RX(sp->rx_new);
319 320
@@ -515,12 +516,6 @@ static inline int sgiseeq_reset(struct net_device *dev)
515 return 0; 516 return 0;
516} 517}
517 518
518void sgiseeq_my_reset(void)
519{
520 printk("RESET!\n");
521 sgiseeq_reset(gdev);
522}
523
524static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) 519static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
525{ 520{
526 struct sgiseeq_private *sp = netdev_priv(dev); 521 struct sgiseeq_private *sp = netdev_priv(dev);
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index 221354eea21f..88e212043a43 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -83,6 +83,7 @@
83#include <linux/if_arp.h> 83#include <linux/if_arp.h>
84#include <linux/init.h> 84#include <linux/init.h>
85#include <linux/if_shaper.h> 85#include <linux/if_shaper.h>
86#include <linux/jiffies.h>
86 87
87#include <net/dst.h> 88#include <net/dst.h>
88#include <net/arp.h> 89#include <net/arp.h>
@@ -168,7 +169,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
168 /* 169 /*
169 * Queue over time. Spill packet. 170 * Queue over time. Spill packet.
170 */ 171 */
171 if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) { 172 if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) {
172 dev_kfree_skb(skb); 173 dev_kfree_skb(skb);
173 shaper->stats.tx_dropped++; 174 shaper->stats.tx_dropped++;
174 } else 175 } else
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index ed4bc91638d2..31dd3f036fa8 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -366,7 +366,7 @@ static const u32 sis190_intr_mask =
366 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 366 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
367 * The chips use a 64 element hash table based on the Ethernet CRC. 367 * The chips use a 64 element hash table based on the Ethernet CRC.
368 */ 368 */
369static int multicast_filter_limit = 32; 369static const int multicast_filter_limit = 32;
370 370
371static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) 371static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
372{ 372{
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7a952fe60be2..a1cb07cdb60f 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -100,7 +100,7 @@ enum {
100 SIS_900 = 0, 100 SIS_900 = 0,
101 SIS_7016 101 SIS_7016
102}; 102};
103static char * card_names[] = { 103static const char * card_names[] = {
104 "SiS 900 PCI Fast Ethernet", 104 "SiS 900 PCI Fast Ethernet",
105 "SiS 7016 PCI Fast Ethernet" 105 "SiS 7016 PCI Fast Ethernet"
106}; 106};
@@ -115,7 +115,7 @@ MODULE_DEVICE_TABLE (pci, sis900_pci_tbl);
115 115
116static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex); 116static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex);
117 117
118static struct mii_chip_info { 118static const struct mii_chip_info {
119 const char * name; 119 const char * name;
120 u16 phy_id0; 120 u16 phy_id0;
121 u16 phy_id1; 121 u16 phy_id1;
@@ -400,7 +400,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
400 void *ring_space; 400 void *ring_space;
401 long ioaddr; 401 long ioaddr;
402 int i, ret; 402 int i, ret;
403 char *card_name = card_names[pci_id->driver_data]; 403 const char *card_name = card_names[pci_id->driver_data];
404 const char *dev_name = pci_name(pci_dev); 404 const char *dev_name = pci_name(pci_dev);
405 405
406/* when built into the kernel, we only print version if device is found */ 406/* when built into the kernel, we only print version if device is found */
@@ -1275,7 +1275,7 @@ static void sis900_timer(unsigned long data)
1275 struct net_device *net_dev = (struct net_device *)data; 1275 struct net_device *net_dev = (struct net_device *)data;
1276 struct sis900_private *sis_priv = net_dev->priv; 1276 struct sis900_private *sis_priv = net_dev->priv;
1277 struct mii_phy *mii_phy = sis_priv->mii; 1277 struct mii_phy *mii_phy = sis_priv->mii;
1278 static int next_tick = 5*HZ; 1278 static const int next_tick = 5*HZ;
1279 u16 status; 1279 u16 status;
1280 1280
1281 if (!sis_priv->autong_complete){ 1281 if (!sis_priv->autong_complete){
diff --git a/drivers/net/sk98lin/h/skaddr.h b/drivers/net/sk98lin/h/skaddr.h
index 3a2ea4a4b539..423ad063d09b 100644
--- a/drivers/net/sk98lin/h/skaddr.h
+++ b/drivers/net/sk98lin/h/skaddr.h
@@ -236,18 +236,6 @@ extern int SkAddrMcClear(
236 SK_U32 PortNumber, 236 SK_U32 PortNumber,
237 int Flags); 237 int Flags);
238 238
239extern int SkAddrXmacMcClear(
240 SK_AC *pAC,
241 SK_IOC IoC,
242 SK_U32 PortNumber,
243 int Flags);
244
245extern int SkAddrGmacMcClear(
246 SK_AC *pAC,
247 SK_IOC IoC,
248 SK_U32 PortNumber,
249 int Flags);
250
251extern int SkAddrMcAdd( 239extern int SkAddrMcAdd(
252 SK_AC *pAC, 240 SK_AC *pAC,
253 SK_IOC IoC, 241 SK_IOC IoC,
@@ -255,35 +243,11 @@ extern int SkAddrMcAdd(
255 SK_MAC_ADDR *pMc, 243 SK_MAC_ADDR *pMc,
256 int Flags); 244 int Flags);
257 245
258extern int SkAddrXmacMcAdd(
259 SK_AC *pAC,
260 SK_IOC IoC,
261 SK_U32 PortNumber,
262 SK_MAC_ADDR *pMc,
263 int Flags);
264
265extern int SkAddrGmacMcAdd(
266 SK_AC *pAC,
267 SK_IOC IoC,
268 SK_U32 PortNumber,
269 SK_MAC_ADDR *pMc,
270 int Flags);
271
272extern int SkAddrMcUpdate( 246extern int SkAddrMcUpdate(
273 SK_AC *pAC, 247 SK_AC *pAC,
274 SK_IOC IoC, 248 SK_IOC IoC,
275 SK_U32 PortNumber); 249 SK_U32 PortNumber);
276 250
277extern int SkAddrXmacMcUpdate(
278 SK_AC *pAC,
279 SK_IOC IoC,
280 SK_U32 PortNumber);
281
282extern int SkAddrGmacMcUpdate(
283 SK_AC *pAC,
284 SK_IOC IoC,
285 SK_U32 PortNumber);
286
287extern int SkAddrOverride( 251extern int SkAddrOverride(
288 SK_AC *pAC, 252 SK_AC *pAC,
289 SK_IOC IoC, 253 SK_IOC IoC,
@@ -297,18 +261,6 @@ extern int SkAddrPromiscuousChange(
297 SK_U32 PortNumber, 261 SK_U32 PortNumber,
298 int NewPromMode); 262 int NewPromMode);
299 263
300extern int SkAddrXmacPromiscuousChange(
301 SK_AC *pAC,
302 SK_IOC IoC,
303 SK_U32 PortNumber,
304 int NewPromMode);
305
306extern int SkAddrGmacPromiscuousChange(
307 SK_AC *pAC,
308 SK_IOC IoC,
309 SK_U32 PortNumber,
310 int NewPromMode);
311
312#ifndef SK_SLIM 264#ifndef SK_SLIM
313extern int SkAddrSwap( 265extern int SkAddrSwap(
314 SK_AC *pAC, 266 SK_AC *pAC,
diff --git a/drivers/net/sk98lin/h/skcsum.h b/drivers/net/sk98lin/h/skcsum.h
index 2b94adb93331..6e256bd9a28c 100644
--- a/drivers/net/sk98lin/h/skcsum.h
+++ b/drivers/net/sk98lin/h/skcsum.h
@@ -203,12 +203,6 @@ extern SKCS_STATUS SkCsGetReceiveInfo(
203 unsigned Checksum2, 203 unsigned Checksum2,
204 int NetNumber); 204 int NetNumber);
205 205
206extern void SkCsGetSendInfo(
207 SK_AC *pAc,
208 void *pIpHeader,
209 SKCS_PACKET_INFO *pPacketInfo,
210 int NetNumber);
211
212extern void SkCsSetReceiveFlags( 206extern void SkCsSetReceiveFlags(
213 SK_AC *pAc, 207 SK_AC *pAc,
214 unsigned ReceiveFlags, 208 unsigned ReceiveFlags,
diff --git a/drivers/net/sk98lin/h/skgeinit.h b/drivers/net/sk98lin/h/skgeinit.h
index 184f47c5a60f..143e635ec24d 100644
--- a/drivers/net/sk98lin/h/skgeinit.h
+++ b/drivers/net/sk98lin/h/skgeinit.h
@@ -464,12 +464,6 @@ typedef struct s_GeInit {
464/* 464/*
465 * public functions in skgeinit.c 465 * public functions in skgeinit.c
466 */ 466 */
467extern void SkGePollRxD(
468 SK_AC *pAC,
469 SK_IOC IoC,
470 int Port,
471 SK_BOOL PollRxD);
472
473extern void SkGePollTxD( 467extern void SkGePollTxD(
474 SK_AC *pAC, 468 SK_AC *pAC,
475 SK_IOC IoC, 469 SK_IOC IoC,
@@ -522,10 +516,6 @@ extern void SkGeXmitLED(
522 int Led, 516 int Led,
523 int Mode); 517 int Mode);
524 518
525extern void SkGeInitRamIface(
526 SK_AC *pAC,
527 SK_IOC IoC);
528
529extern int SkGeInitAssignRamToQueues( 519extern int SkGeInitAssignRamToQueues(
530 SK_AC *pAC, 520 SK_AC *pAC,
531 int ActivePort, 521 int ActivePort,
@@ -549,11 +539,6 @@ extern void SkMacHardRst(
549 SK_IOC IoC, 539 SK_IOC IoC,
550 int Port); 540 int Port);
551 541
552extern void SkMacClearRst(
553 SK_AC *pAC,
554 SK_IOC IoC,
555 int Port);
556
557extern void SkXmInitMac( 542extern void SkXmInitMac(
558 SK_AC *pAC, 543 SK_AC *pAC,
559 SK_IOC IoC, 544 SK_IOC IoC,
@@ -580,11 +565,6 @@ extern void SkMacFlushTxFifo(
580 SK_IOC IoC, 565 SK_IOC IoC,
581 int Port); 566 int Port);
582 567
583extern void SkMacFlushRxFifo(
584 SK_AC *pAC,
585 SK_IOC IoC,
586 int Port);
587
588extern void SkMacIrq( 568extern void SkMacIrq(
589 SK_AC *pAC, 569 SK_AC *pAC,
590 SK_IOC IoC, 570 SK_IOC IoC,
@@ -601,12 +581,6 @@ extern void SkMacAutoNegLipaPhy(
601 int Port, 581 int Port,
602 SK_U16 IStatus); 582 SK_U16 IStatus);
603 583
604extern void SkMacSetRxTxEn(
605 SK_AC *pAC,
606 SK_IOC IoC,
607 int Port,
608 int Para);
609
610extern int SkMacRxTxEnable( 584extern int SkMacRxTxEnable(
611 SK_AC *pAC, 585 SK_AC *pAC,
612 SK_IOC IoC, 586 SK_IOC IoC,
@@ -659,16 +633,6 @@ extern void SkXmClrExactAddr(
659 int StartNum, 633 int StartNum,
660 int StopNum); 634 int StopNum);
661 635
662extern void SkXmInitDupMd(
663 SK_AC *pAC,
664 SK_IOC IoC,
665 int Port);
666
667extern void SkXmInitPauseMd(
668 SK_AC *pAC,
669 SK_IOC IoC,
670 int Port);
671
672extern void SkXmAutoNegLipaXmac( 636extern void SkXmAutoNegLipaXmac(
673 SK_AC *pAC, 637 SK_AC *pAC,
674 SK_IOC IoC, 638 SK_IOC IoC,
@@ -729,17 +693,6 @@ extern int SkGmCableDiagStatus(
729 int Port, 693 int Port,
730 SK_BOOL StartTest); 694 SK_BOOL StartTest);
731 695
732extern int SkGmEnterLowPowerMode(
733 SK_AC *pAC,
734 SK_IOC IoC,
735 int Port,
736 SK_U8 Mode);
737
738extern int SkGmLeaveLowPowerMode(
739 SK_AC *pAC,
740 SK_IOC IoC,
741 int Port);
742
743#ifdef SK_DIAG 696#ifdef SK_DIAG
744extern void SkGePhyRead( 697extern void SkGePhyRead(
745 SK_AC *pAC, 698 SK_AC *pAC,
@@ -782,7 +735,6 @@ extern void SkXmSendCont(
782/* 735/*
783 * public functions in skgeinit.c 736 * public functions in skgeinit.c
784 */ 737 */
785extern void SkGePollRxD();
786extern void SkGePollTxD(); 738extern void SkGePollTxD();
787extern void SkGeYellowLED(); 739extern void SkGeYellowLED();
788extern int SkGeCfgSync(); 740extern int SkGeCfgSync();
@@ -792,7 +744,6 @@ extern int SkGeInit();
792extern void SkGeDeInit(); 744extern void SkGeDeInit();
793extern int SkGeInitPort(); 745extern int SkGeInitPort();
794extern void SkGeXmitLED(); 746extern void SkGeXmitLED();
795extern void SkGeInitRamIface();
796extern int SkGeInitAssignRamToQueues(); 747extern int SkGeInitAssignRamToQueues();
797 748
798/* 749/*
@@ -801,18 +752,15 @@ extern int SkGeInitAssignRamToQueues();
801extern void SkMacRxTxDisable(); 752extern void SkMacRxTxDisable();
802extern void SkMacSoftRst(); 753extern void SkMacSoftRst();
803extern void SkMacHardRst(); 754extern void SkMacHardRst();
804extern void SkMacClearRst();
805extern void SkMacInitPhy(); 755extern void SkMacInitPhy();
806extern int SkMacRxTxEnable(); 756extern int SkMacRxTxEnable();
807extern void SkMacPromiscMode(); 757extern void SkMacPromiscMode();
808extern void SkMacHashing(); 758extern void SkMacHashing();
809extern void SkMacIrqDisable(); 759extern void SkMacIrqDisable();
810extern void SkMacFlushTxFifo(); 760extern void SkMacFlushTxFifo();
811extern void SkMacFlushRxFifo();
812extern void SkMacIrq(); 761extern void SkMacIrq();
813extern int SkMacAutoNegDone(); 762extern int SkMacAutoNegDone();
814extern void SkMacAutoNegLipaPhy(); 763extern void SkMacAutoNegLipaPhy();
815extern void SkMacSetRxTxEn();
816extern void SkXmInitMac(); 764extern void SkXmInitMac();
817extern void SkXmPhyRead(); 765extern void SkXmPhyRead();
818extern void SkXmPhyWrite(); 766extern void SkXmPhyWrite();
@@ -820,8 +768,6 @@ extern void SkGmInitMac();
820extern void SkGmPhyRead(); 768extern void SkGmPhyRead();
821extern void SkGmPhyWrite(); 769extern void SkGmPhyWrite();
822extern void SkXmClrExactAddr(); 770extern void SkXmClrExactAddr();
823extern void SkXmInitDupMd();
824extern void SkXmInitPauseMd();
825extern void SkXmAutoNegLipaXmac(); 771extern void SkXmAutoNegLipaXmac();
826extern int SkXmUpdateStats(); 772extern int SkXmUpdateStats();
827extern int SkGmUpdateStats(); 773extern int SkGmUpdateStats();
@@ -832,8 +778,6 @@ extern int SkGmResetCounter();
832extern int SkXmOverflowStatus(); 778extern int SkXmOverflowStatus();
833extern int SkGmOverflowStatus(); 779extern int SkGmOverflowStatus();
834extern int SkGmCableDiagStatus(); 780extern int SkGmCableDiagStatus();
835extern int SkGmEnterLowPowerMode();
836extern int SkGmLeaveLowPowerMode();
837 781
838#ifdef SK_DIAG 782#ifdef SK_DIAG
839extern void SkGePhyRead(); 783extern void SkGePhyRead();
diff --git a/drivers/net/sk98lin/h/skgepnmi.h b/drivers/net/sk98lin/h/skgepnmi.h
index 3b2773e6f822..1ed214ccb253 100644
--- a/drivers/net/sk98lin/h/skgepnmi.h
+++ b/drivers/net/sk98lin/h/skgepnmi.h
@@ -946,10 +946,6 @@ typedef struct s_PnmiData {
946 * Function prototypes 946 * Function prototypes
947 */ 947 */
948extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level); 948extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level);
949extern int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
950 unsigned int* pLen, SK_U32 Instance, SK_U32 NetIndex);
951extern int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id,
952 void* pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
953extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, 949extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
954 unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); 950 unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
955extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, 951extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
diff --git a/drivers/net/sk98lin/h/skgesirq.h b/drivers/net/sk98lin/h/skgesirq.h
index b486bd9b6628..3eec6274e413 100644
--- a/drivers/net/sk98lin/h/skgesirq.h
+++ b/drivers/net/sk98lin/h/skgesirq.h
@@ -105,7 +105,6 @@
105 105
106extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); 106extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus);
107extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); 107extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para);
108extern void SkHWLinkUp(SK_AC *pAC, SK_IOC IoC, int Port);
109extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port); 108extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port);
110 109
111#endif /* _INC_SKGESIRQ_H_ */ 110#endif /* _INC_SKGESIRQ_H_ */
diff --git a/drivers/net/sk98lin/h/ski2c.h b/drivers/net/sk98lin/h/ski2c.h
index 598bb42ccc3d..6a63f4a15de6 100644
--- a/drivers/net/sk98lin/h/ski2c.h
+++ b/drivers/net/sk98lin/h/ski2c.h
@@ -162,9 +162,6 @@ typedef struct s_I2c {
162} SK_I2C; 162} SK_I2C;
163 163
164extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level); 164extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level);
165extern int SkI2cWrite(SK_AC *pAC, SK_IOC IoC, SK_U32 Data, int Dev, int Size,
166 int Reg, int Burst);
167extern int SkI2cReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen);
168#ifdef SK_DIAG 165#ifdef SK_DIAG
169extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg, 166extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg,
170 int Burst); 167 int Burst);
diff --git a/drivers/net/sk98lin/h/skvpd.h b/drivers/net/sk98lin/h/skvpd.h
index daa9a8d154fc..fdd9e48e8040 100644
--- a/drivers/net/sk98lin/h/skvpd.h
+++ b/drivers/net/sk98lin/h/skvpd.h
@@ -183,14 +183,6 @@ extern SK_U32 VpdReadDWord(
183 int addr); 183 int addr);
184#endif /* SKDIAG */ 184#endif /* SKDIAG */
185 185
186extern int VpdSetupPara(
187 SK_AC *pAC,
188 const char *key,
189 const char *buf,
190 int len,
191 int type,
192 int op);
193
194extern SK_VPD_STATUS *VpdStat( 186extern SK_VPD_STATUS *VpdStat(
195 SK_AC *pAC, 187 SK_AC *pAC,
196 SK_IOC IoC); 188 SK_IOC IoC);
@@ -227,11 +219,6 @@ extern int VpdUpdate(
227 SK_AC *pAC, 219 SK_AC *pAC,
228 SK_IOC IoC); 220 SK_IOC IoC);
229 221
230extern void VpdErrLog(
231 SK_AC *pAC,
232 SK_IOC IoC,
233 char *msg);
234
235#ifdef SKDIAG 222#ifdef SKDIAG
236extern int VpdReadBlock( 223extern int VpdReadBlock(
237 SK_AC *pAC, 224 SK_AC *pAC,
@@ -249,7 +236,6 @@ extern int VpdWriteBlock(
249#endif /* SKDIAG */ 236#endif /* SKDIAG */
250#else /* SK_KR_PROTO */ 237#else /* SK_KR_PROTO */
251extern SK_U32 VpdReadDWord(); 238extern SK_U32 VpdReadDWord();
252extern int VpdSetupPara();
253extern SK_VPD_STATUS *VpdStat(); 239extern SK_VPD_STATUS *VpdStat();
254extern int VpdKeys(); 240extern int VpdKeys();
255extern int VpdRead(); 241extern int VpdRead();
@@ -257,7 +243,6 @@ extern SK_BOOL VpdMayWrite();
257extern int VpdWrite(); 243extern int VpdWrite();
258extern int VpdDelete(); 244extern int VpdDelete();
259extern int VpdUpdate(); 245extern int VpdUpdate();
260extern void VpdErrLog();
261#endif /* SK_KR_PROTO */ 246#endif /* SK_KR_PROTO */
262 247
263#endif /* __INC_SKVPD_H_ */ 248#endif /* __INC_SKVPD_H_ */
diff --git a/drivers/net/sk98lin/skaddr.c b/drivers/net/sk98lin/skaddr.c
index a7e25edc7fc4..6e6c56aa6d6f 100644
--- a/drivers/net/sk98lin/skaddr.c
+++ b/drivers/net/sk98lin/skaddr.c
@@ -87,6 +87,21 @@ static const SK_U16 OnesHash[4] = {0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF};
87static int Next0[SK_MAX_MACS] = {0}; 87static int Next0[SK_MAX_MACS] = {0};
88#endif /* DEBUG */ 88#endif /* DEBUG */
89 89
90static int SkAddrGmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
91 SK_MAC_ADDR *pMc, int Flags);
92static int SkAddrGmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
93 int Flags);
94static int SkAddrGmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber);
95static int SkAddrGmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC,
96 SK_U32 PortNumber, int NewPromMode);
97static int SkAddrXmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
98 SK_MAC_ADDR *pMc, int Flags);
99static int SkAddrXmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
100 int Flags);
101static int SkAddrXmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber);
102static int SkAddrXmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC,
103 SK_U32 PortNumber, int NewPromMode);
104
90/* functions ******************************************************************/ 105/* functions ******************************************************************/
91 106
92/****************************************************************************** 107/******************************************************************************
@@ -372,7 +387,7 @@ int Flags) /* permanent/non-perm, sw-only */
372 * SK_ADDR_SUCCESS 387 * SK_ADDR_SUCCESS
373 * SK_ADDR_ILLEGAL_PORT 388 * SK_ADDR_ILLEGAL_PORT
374 */ 389 */
375int SkAddrXmacMcClear( 390static int SkAddrXmacMcClear(
376SK_AC *pAC, /* adapter context */ 391SK_AC *pAC, /* adapter context */
377SK_IOC IoC, /* I/O context */ 392SK_IOC IoC, /* I/O context */
378SK_U32 PortNumber, /* Index of affected port */ 393SK_U32 PortNumber, /* Index of affected port */
@@ -429,7 +444,7 @@ int Flags) /* permanent/non-perm, sw-only */
429 * SK_ADDR_SUCCESS 444 * SK_ADDR_SUCCESS
430 * SK_ADDR_ILLEGAL_PORT 445 * SK_ADDR_ILLEGAL_PORT
431 */ 446 */
432int SkAddrGmacMcClear( 447static int SkAddrGmacMcClear(
433SK_AC *pAC, /* adapter context */ 448SK_AC *pAC, /* adapter context */
434SK_IOC IoC, /* I/O context */ 449SK_IOC IoC, /* I/O context */
435SK_U32 PortNumber, /* Index of affected port */ 450SK_U32 PortNumber, /* Index of affected port */
@@ -519,7 +534,7 @@ int Flags) /* permanent/non-perm, sw-only */
519 * Returns: 534 * Returns:
520 * Hash value of multicast address. 535 * Hash value of multicast address.
521 */ 536 */
522SK_U32 SkXmacMcHash( 537static SK_U32 SkXmacMcHash(
523unsigned char *pMc) /* Multicast address */ 538unsigned char *pMc) /* Multicast address */
524{ 539{
525 SK_U32 Idx; 540 SK_U32 Idx;
@@ -557,7 +572,7 @@ unsigned char *pMc) /* Multicast address */
557 * Returns: 572 * Returns:
558 * Hash value of multicast address. 573 * Hash value of multicast address.
559 */ 574 */
560SK_U32 SkGmacMcHash( 575static SK_U32 SkGmacMcHash(
561unsigned char *pMc) /* Multicast address */ 576unsigned char *pMc) /* Multicast address */
562{ 577{
563 SK_U32 Data; 578 SK_U32 Data;
@@ -672,7 +687,7 @@ int Flags) /* permanent/non-permanent */
672 * SK_MC_ILLEGAL_ADDRESS 687 * SK_MC_ILLEGAL_ADDRESS
673 * SK_MC_RLMT_OVERFLOW 688 * SK_MC_RLMT_OVERFLOW
674 */ 689 */
675int SkAddrXmacMcAdd( 690static int SkAddrXmacMcAdd(
676SK_AC *pAC, /* adapter context */ 691SK_AC *pAC, /* adapter context */
677SK_IOC IoC, /* I/O context */ 692SK_IOC IoC, /* I/O context */
678SK_U32 PortNumber, /* Port Number */ 693SK_U32 PortNumber, /* Port Number */
@@ -778,7 +793,7 @@ int Flags) /* permanent/non-permanent */
778 * SK_MC_FILTERING_INEXACT 793 * SK_MC_FILTERING_INEXACT
779 * SK_MC_ILLEGAL_ADDRESS 794 * SK_MC_ILLEGAL_ADDRESS
780 */ 795 */
781int SkAddrGmacMcAdd( 796static int SkAddrGmacMcAdd(
782SK_AC *pAC, /* adapter context */ 797SK_AC *pAC, /* adapter context */
783SK_IOC IoC, /* I/O context */ 798SK_IOC IoC, /* I/O context */
784SK_U32 PortNumber, /* Port Number */ 799SK_U32 PortNumber, /* Port Number */
@@ -937,7 +952,7 @@ SK_U32 PortNumber) /* Port Number */
937 * SK_MC_FILTERING_INEXACT 952 * SK_MC_FILTERING_INEXACT
938 * SK_ADDR_ILLEGAL_PORT 953 * SK_ADDR_ILLEGAL_PORT
939 */ 954 */
940int SkAddrXmacMcUpdate( 955static int SkAddrXmacMcUpdate(
941SK_AC *pAC, /* adapter context */ 956SK_AC *pAC, /* adapter context */
942SK_IOC IoC, /* I/O context */ 957SK_IOC IoC, /* I/O context */
943SK_U32 PortNumber) /* Port Number */ 958SK_U32 PortNumber) /* Port Number */
@@ -1082,7 +1097,7 @@ SK_U32 PortNumber) /* Port Number */
1082 * SK_MC_FILTERING_INEXACT 1097 * SK_MC_FILTERING_INEXACT
1083 * SK_ADDR_ILLEGAL_PORT 1098 * SK_ADDR_ILLEGAL_PORT
1084 */ 1099 */
1085int SkAddrGmacMcUpdate( 1100static int SkAddrGmacMcUpdate(
1086SK_AC *pAC, /* adapter context */ 1101SK_AC *pAC, /* adapter context */
1087SK_IOC IoC, /* I/O context */ 1102SK_IOC IoC, /* I/O context */
1088SK_U32 PortNumber) /* Port Number */ 1103SK_U32 PortNumber) /* Port Number */
@@ -1468,7 +1483,7 @@ int NewPromMode) /* new promiscuous mode */
1468 * SK_ADDR_SUCCESS 1483 * SK_ADDR_SUCCESS
1469 * SK_ADDR_ILLEGAL_PORT 1484 * SK_ADDR_ILLEGAL_PORT
1470 */ 1485 */
1471int SkAddrXmacPromiscuousChange( 1486static int SkAddrXmacPromiscuousChange(
1472SK_AC *pAC, /* adapter context */ 1487SK_AC *pAC, /* adapter context */
1473SK_IOC IoC, /* I/O context */ 1488SK_IOC IoC, /* I/O context */
1474SK_U32 PortNumber, /* port whose promiscuous mode changes */ 1489SK_U32 PortNumber, /* port whose promiscuous mode changes */
@@ -1585,7 +1600,7 @@ int NewPromMode) /* new promiscuous mode */
1585 * SK_ADDR_SUCCESS 1600 * SK_ADDR_SUCCESS
1586 * SK_ADDR_ILLEGAL_PORT 1601 * SK_ADDR_ILLEGAL_PORT
1587 */ 1602 */
1588int SkAddrGmacPromiscuousChange( 1603static int SkAddrGmacPromiscuousChange(
1589SK_AC *pAC, /* adapter context */ 1604SK_AC *pAC, /* adapter context */
1590SK_IOC IoC, /* I/O context */ 1605SK_IOC IoC, /* I/O context */
1591SK_U32 PortNumber, /* port whose promiscuous mode changes */ 1606SK_U32 PortNumber, /* port whose promiscuous mode changes */
diff --git a/drivers/net/sk98lin/skgeinit.c b/drivers/net/sk98lin/skgeinit.c
index 6cb49dd02251..67f1d6a5c15d 100644
--- a/drivers/net/sk98lin/skgeinit.c
+++ b/drivers/net/sk98lin/skgeinit.c
@@ -59,34 +59,6 @@ static struct s_Config OemConfig = {
59 59
60/****************************************************************************** 60/******************************************************************************
61 * 61 *
62 * SkGePollRxD() - Enable / Disable Descriptor Polling of RxD Ring
63 *
64 * Description:
65 * Enable or disable the descriptor polling of the receive descriptor
66 * ring (RxD) for port 'Port'.
67 * The new configuration is *not* saved over any SkGeStopPort() and
68 * SkGeInitPort() calls.
69 *
70 * Returns:
71 * nothing
72 */
73void SkGePollRxD(
74SK_AC *pAC, /* adapter context */
75SK_IOC IoC, /* IO context */
76int Port, /* Port Index (MAC_1 + n) */
77SK_BOOL PollRxD) /* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */
78{
79 SK_GEPORT *pPrt;
80
81 pPrt = &pAC->GIni.GP[Port];
82
83 SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), (PollRxD) ?
84 CSR_ENA_POL : CSR_DIS_POL);
85} /* SkGePollRxD */
86
87
88/******************************************************************************
89 *
90 * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings 62 * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings
91 * 63 *
92 * Description: 64 * Description:
@@ -952,7 +924,7 @@ int Port) /* Port Index (MAC_1 + n) */
952 * Returns: 924 * Returns:
953 * nothing 925 * nothing
954 */ 926 */
955void SkGeInitRamIface( 927static void SkGeInitRamIface(
956SK_AC *pAC, /* adapter context */ 928SK_AC *pAC, /* adapter context */
957SK_IOC IoC) /* IO context */ 929SK_IOC IoC) /* IO context */
958{ 930{
@@ -1409,83 +1381,6 @@ SK_IOC IoC) /* IO context */
1409 1381
1410} /* SkGeInit0*/ 1382} /* SkGeInit0*/
1411 1383
1412#ifdef SK_PCI_RESET
1413
1414/******************************************************************************
1415 *
1416 * SkGePciReset() - Reset PCI interface
1417 *
1418 * Description:
1419 * o Read PCI configuration.
1420 * o Change power state to 3.
1421 * o Change power state to 0.
1422 * o Restore PCI configuration.
1423 *
1424 * Returns:
1425 * 0: Success.
1426 * 1: Power state could not be changed to 3.
1427 */
1428static int SkGePciReset(
1429SK_AC *pAC, /* adapter context */
1430SK_IOC IoC) /* IO context */
1431{
1432 int i;
1433 SK_U16 PmCtlSts;
1434 SK_U32 Bp1;
1435 SK_U32 Bp2;
1436 SK_U16 PciCmd;
1437 SK_U8 Cls;
1438 SK_U8 Lat;
1439 SK_U8 ConfigSpace[PCI_CFG_SIZE];
1440
1441 /*
1442 * Note: Switching to D3 state is like a software reset.
1443 * Switching from D3 to D0 is a hardware reset.
1444 * We have to save and restore the configuration space.
1445 */
1446 for (i = 0; i < PCI_CFG_SIZE; i++) {
1447 SkPciReadCfgDWord(pAC, i*4, &ConfigSpace[i]);
1448 }
1449
1450 /* We know the RAM Interface Arbiter is enabled. */
1451 SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D3);
1452 SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts);
1453
1454 if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D3) {
1455 return(1);
1456 }
1457
1458 /* Return to D0 state. */
1459 SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D0);
1460
1461 /* Check for D0 state. */
1462 SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts);
1463
1464 if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D0) {
1465 return(1);
1466 }
1467
1468 /* Check PCI Config Registers. */
1469 SkPciReadCfgWord(pAC, PCI_COMMAND, &PciCmd);
1470 SkPciReadCfgByte(pAC, PCI_CACHE_LSZ, &Cls);
1471 SkPciReadCfgDWord(pAC, PCI_BASE_1ST, &Bp1);
1472 SkPciReadCfgDWord(pAC, PCI_BASE_2ND, &Bp2);
1473 SkPciReadCfgByte(pAC, PCI_LAT_TIM, &Lat);
1474
1475 if (PciCmd != 0 || Cls != (SK_U8)0 || Lat != (SK_U8)0 ||
1476 (Bp1 & 0xfffffff0L) != 0 || Bp2 != 1) {
1477 return(1);
1478 }
1479
1480 /* Restore PCI Config Space. */
1481 for (i = 0; i < PCI_CFG_SIZE; i++) {
1482 SkPciWriteCfgDWord(pAC, i*4, ConfigSpace[i]);
1483 }
1484
1485 return(0);
1486} /* SkGePciReset */
1487
1488#endif /* SK_PCI_RESET */
1489 1384
1490/****************************************************************************** 1385/******************************************************************************
1491 * 1386 *
@@ -1524,10 +1419,6 @@ SK_IOC IoC) /* IO context */
1524 /* save CLK_RUN bits (YUKON-Lite) */ 1419 /* save CLK_RUN bits (YUKON-Lite) */
1525 SK_IN16(IoC, B0_CTST, &CtrlStat); 1420 SK_IN16(IoC, B0_CTST, &CtrlStat);
1526 1421
1527#ifdef SK_PCI_RESET
1528 (void)SkGePciReset(pAC, IoC);
1529#endif /* SK_PCI_RESET */
1530
1531 /* do the SW-reset */ 1422 /* do the SW-reset */
1532 SK_OUT8(IoC, B0_CTST, CS_RST_SET); 1423 SK_OUT8(IoC, B0_CTST, CS_RST_SET);
1533 1424
@@ -1991,11 +1882,6 @@ SK_IOC IoC) /* IO context */
1991 int i; 1882 int i;
1992 SK_U16 Word; 1883 SK_U16 Word;
1993 1884
1994#ifdef SK_PHY_LP_MODE
1995 SK_U8 Byte;
1996 SK_U16 PmCtlSts;
1997#endif /* SK_PHY_LP_MODE */
1998
1999#if (!defined(SK_SLIM) && !defined(VCPU)) 1885#if (!defined(SK_SLIM) && !defined(VCPU))
2000 /* ensure I2C is ready */ 1886 /* ensure I2C is ready */
2001 SkI2cWaitIrq(pAC, IoC); 1887 SkI2cWaitIrq(pAC, IoC);
@@ -2010,38 +1896,6 @@ SK_IOC IoC) /* IO context */
2010 } 1896 }
2011 } 1897 }
2012 1898
2013#ifdef SK_PHY_LP_MODE
2014 /*
2015 * for power saving purposes within mobile environments
2016 * we set the PHY to coma mode and switch to D3 power state.
2017 */
2018 if (pAC->GIni.GIYukonLite &&
2019 pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
2020
2021 /* for all ports switch PHY to coma mode */
2022 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
2023
2024 SkGmEnterLowPowerMode(pAC, IoC, i, PHY_PM_DEEP_SLEEP);
2025 }
2026
2027 if (pAC->GIni.GIVauxAvail) {
2028 /* switch power to VAUX */
2029 Byte = PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF;
2030
2031 SK_OUT8(IoC, B0_POWER_CTRL, Byte);
2032 }
2033
2034 /* switch to D3 state */
2035 SK_IN16(IoC, PCI_C(PCI_PM_CTL_STS), &PmCtlSts);
2036
2037 PmCtlSts |= PCI_PM_STATE_D3;
2038
2039 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2040
2041 SK_OUT16(IoC, PCI_C(PCI_PM_CTL_STS), PmCtlSts);
2042 }
2043#endif /* SK_PHY_LP_MODE */
2044
2045 /* Reset all bits in the PCI STATUS register */ 1899 /* Reset all bits in the PCI STATUS register */
2046 /* 1900 /*
2047 * Note: PCI Cfg cycles cannot be used, because they are not 1901 * Note: PCI Cfg cycles cannot be used, because they are not
diff --git a/drivers/net/sk98lin/skgemib.c b/drivers/net/sk98lin/skgemib.c
index 2991bc85cf2c..0a6f67a7a395 100644
--- a/drivers/net/sk98lin/skgemib.c
+++ b/drivers/net/sk98lin/skgemib.c
@@ -871,13 +871,6 @@ PNMI_STATIC const SK_PNMI_TAB_ENTRY IdTable[] = {
871 sizeof(SK_PNMI_CONF), 871 sizeof(SK_PNMI_CONF),
872 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType), 872 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType),
873 SK_PNMI_RO, MacPrivateConf, 0}, 873 SK_PNMI_RO, MacPrivateConf, 0},
874#ifdef SK_PHY_LP_MODE
875 {OID_SKGE_PHY_LP_MODE,
876 SK_PNMI_MAC_ENTRIES,
877 sizeof(SK_PNMI_CONF),
878 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyMode),
879 SK_PNMI_RW, MacPrivateConf, 0},
880#endif
881 {OID_SKGE_LINK_CAP, 874 {OID_SKGE_LINK_CAP,
882 SK_PNMI_MAC_ENTRIES, 875 SK_PNMI_MAC_ENTRIES,
883 sizeof(SK_PNMI_CONF), 876 sizeof(SK_PNMI_CONF),
diff --git a/drivers/net/sk98lin/skgepnmi.c b/drivers/net/sk98lin/skgepnmi.c
index a386172107e8..b36dd9ac6b29 100644
--- a/drivers/net/sk98lin/skgepnmi.c
+++ b/drivers/net/sk98lin/skgepnmi.c
@@ -56,10 +56,6 @@ static const char SysKonnectFileId[] =
56 * Public Function prototypes 56 * Public Function prototypes
57 */ 57 */
58int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level); 58int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level);
59int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
60 unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
61int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
62 unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
63int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, 59int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
64 unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); 60 unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
65int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, 61int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
@@ -587,7 +583,7 @@ int Level) /* Initialization level */
587 * exist (e.g. port instance 3 on a two port 583 * exist (e.g. port instance 3 on a two port
588 * adapter. 584 * adapter.
589 */ 585 */
590int SkPnmiGetVar( 586static int SkPnmiGetVar(
591SK_AC *pAC, /* Pointer to adapter context */ 587SK_AC *pAC, /* Pointer to adapter context */
592SK_IOC IoC, /* IO context handle */ 588SK_IOC IoC, /* IO context handle */
593SK_U32 Id, /* Object ID that is to be processed */ 589SK_U32 Id, /* Object ID that is to be processed */
@@ -629,7 +625,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
629 * exist (e.g. port instance 3 on a two port 625 * exist (e.g. port instance 3 on a two port
630 * adapter. 626 * adapter.
631 */ 627 */
632int SkPnmiPreSetVar( 628static int SkPnmiPreSetVar(
633SK_AC *pAC, /* Pointer to adapter context */ 629SK_AC *pAC, /* Pointer to adapter context */
634SK_IOC IoC, /* IO context handle */ 630SK_IOC IoC, /* IO context handle */
635SK_U32 Id, /* Object ID that is to be processed */ 631SK_U32 Id, /* Object ID that is to be processed */
@@ -5062,9 +5058,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
5062 case OID_SKGE_SPEED_CAP: 5058 case OID_SKGE_SPEED_CAP:
5063 case OID_SKGE_SPEED_MODE: 5059 case OID_SKGE_SPEED_MODE:
5064 case OID_SKGE_SPEED_STATUS: 5060 case OID_SKGE_SPEED_STATUS:
5065#ifdef SK_PHY_LP_MODE
5066 case OID_SKGE_PHY_LP_MODE:
5067#endif
5068 if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) { 5061 if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) {
5069 5062
5070 *pLen = (Limit - LogPortIndex) * sizeof(SK_U8); 5063 *pLen = (Limit - LogPortIndex) * sizeof(SK_U8);
@@ -5140,28 +5133,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
5140 Offset += sizeof(SK_U32); 5133 Offset += sizeof(SK_U32);
5141 break; 5134 break;
5142 5135
5143#ifdef SK_PHY_LP_MODE
5144 case OID_SKGE_PHY_LP_MODE:
5145 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5146 if (LogPortIndex == 0) {
5147 continue;
5148 }
5149 else {
5150 /* Get value for physical ports */
5151 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
5152 Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState;
5153 *pBufPtr = Val8;
5154 }
5155 }
5156 else { /* DualNetMode */
5157
5158 Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState;
5159 *pBufPtr = Val8;
5160 }
5161 Offset += sizeof(SK_U8);
5162 break;
5163#endif
5164
5165 case OID_SKGE_LINK_CAP: 5136 case OID_SKGE_LINK_CAP:
5166 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ 5137 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5167 if (LogPortIndex == 0) { 5138 if (LogPortIndex == 0) {
@@ -5478,16 +5449,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
5478 } 5449 }
5479 break; 5450 break;
5480 5451
5481#ifdef SK_PHY_LP_MODE
5482 case OID_SKGE_PHY_LP_MODE:
5483 if (*pLen < Limit - LogPortIndex) {
5484
5485 *pLen = Limit - LogPortIndex;
5486 return (SK_PNMI_ERR_TOO_SHORT);
5487 }
5488 break;
5489#endif
5490
5491 case OID_SKGE_MTU: 5452 case OID_SKGE_MTU:
5492 if (*pLen < sizeof(SK_U32)) { 5453 if (*pLen < sizeof(SK_U32)) {
5493 5454
@@ -5845,116 +5806,6 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
5845 Offset += sizeof(SK_U32); 5806 Offset += sizeof(SK_U32);
5846 break; 5807 break;
5847 5808
5848#ifdef SK_PHY_LP_MODE
5849 case OID_SKGE_PHY_LP_MODE:
5850 /* The preset ends here */
5851 if (Action == SK_PNMI_PRESET) {
5852
5853 return (SK_PNMI_ERR_OK);
5854 }
5855
5856 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5857 if (LogPortIndex == 0) {
5858 Offset = 0;
5859 continue;
5860 }
5861 else {
5862 /* Set value for physical ports */
5863 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
5864
5865 switch (*(pBuf + Offset)) {
5866 case 0:
5867 /* If LowPowerMode is active, we can leave it. */
5868 if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
5869
5870 Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex);
5871
5872 if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3) {
5873
5874 SkDrvInitAdapter(pAC);
5875 }
5876 break;
5877 }
5878 else {
5879 *pLen = 0;
5880 return (SK_PNMI_ERR_GENERAL);
5881 }
5882 case 1:
5883 case 2:
5884 case 3:
5885 case 4:
5886 /* If no LowPowerMode is active, we can enter it. */
5887 if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
5888
5889 if ((*(pBuf + Offset)) < 3) {
5890
5891 SkDrvDeInitAdapter(pAC);
5892 }
5893
5894 Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf);
5895 break;
5896 }
5897 else {
5898 *pLen = 0;
5899 return (SK_PNMI_ERR_GENERAL);
5900 }
5901 default:
5902 *pLen = 0;
5903 return (SK_PNMI_ERR_BAD_VALUE);
5904 }
5905 }
5906 }
5907 else { /* DualNetMode */
5908
5909 switch (*(pBuf + Offset)) {
5910 case 0:
5911 /* If we are in a LowPowerMode, we can leave it. */
5912 if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
5913
5914 Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex);
5915
5916 if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3) {
5917
5918 SkDrvInitAdapter(pAC);
5919 }
5920 break;
5921 }
5922 else {
5923 *pLen = 0;
5924 return (SK_PNMI_ERR_GENERAL);
5925 }
5926
5927 case 1:
5928 case 2:
5929 case 3:
5930 case 4:
5931 /* If we are not already in LowPowerMode, we can enter it. */
5932 if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
5933
5934 if ((*(pBuf + Offset)) < 3) {
5935
5936 SkDrvDeInitAdapter(pAC);
5937 }
5938 else {
5939
5940 Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf);
5941 }
5942 break;
5943 }
5944 else {
5945 *pLen = 0;
5946 return (SK_PNMI_ERR_GENERAL);
5947 }
5948
5949 default:
5950 *pLen = 0;
5951 return (SK_PNMI_ERR_BAD_VALUE);
5952 }
5953 }
5954 Offset += sizeof(SK_U8);
5955 break;
5956#endif
5957
5958 default: 5809 default:
5959 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, 5810 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
5960 ("MacPrivateConf: Unknown OID should be handled before set")); 5811 ("MacPrivateConf: Unknown OID should be handled before set"));
diff --git a/drivers/net/sk98lin/skgesirq.c b/drivers/net/sk98lin/skgesirq.c
index 87520f0057d7..ab66d80a4455 100644
--- a/drivers/net/sk98lin/skgesirq.c
+++ b/drivers/net/sk98lin/skgesirq.c
@@ -265,7 +265,7 @@ int Port) /* Port Index (MAC_1 + n) */
265 * 265 *
266 * Returns: N/A 266 * Returns: N/A
267 */ 267 */
268void SkHWLinkUp( 268static void SkHWLinkUp(
269SK_AC *pAC, /* adapter context */ 269SK_AC *pAC, /* adapter context */
270SK_IOC IoC, /* IO context */ 270SK_IOC IoC, /* IO context */
271int Port) /* Port Index (MAC_1 + n) */ 271int Port) /* Port Index (MAC_1 + n) */
@@ -612,14 +612,6 @@ SK_U32 Istatus) /* Interrupt status word */
612 * we ignore those 612 * we ignore those
613 */ 613 */
614 pPrt->HalfDupTimerActive = SK_TRUE; 614 pPrt->HalfDupTimerActive = SK_TRUE;
615#ifdef XXX
616 Len = sizeof(SK_U64);
617 SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
618 &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 0),
619 pAC->Rlmt.Port[0].Net->NetNumber);
620
621 pPrt->LastOctets = Octets;
622#endif /* XXX */
623 /* Snap statistic counters */ 615 /* Snap statistic counters */
624 (void)SkXmUpdateStats(pAC, IoC, 0); 616 (void)SkXmUpdateStats(pAC, IoC, 0);
625 617
@@ -653,14 +645,6 @@ SK_U32 Istatus) /* Interrupt status word */
653 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && 645 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) &&
654 !pPrt->HalfDupTimerActive) { 646 !pPrt->HalfDupTimerActive) {
655 pPrt->HalfDupTimerActive = SK_TRUE; 647 pPrt->HalfDupTimerActive = SK_TRUE;
656#ifdef XXX
657 Len = sizeof(SK_U64);
658 SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
659 &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 1),
660 pAC->Rlmt.Port[1].Net->NetNumber);
661
662 pPrt->LastOctets = Octets;
663#endif /* XXX */
664 /* Snap statistic counters */ 648 /* Snap statistic counters */
665 (void)SkXmUpdateStats(pAC, IoC, 1); 649 (void)SkXmUpdateStats(pAC, IoC, 1);
666 650
@@ -2085,12 +2069,6 @@ SK_EVPARA Para) /* Event specific Parameter */
2085 pPrt->HalfDupTimerActive = SK_FALSE; 2069 pPrt->HalfDupTimerActive = SK_FALSE;
2086 if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || 2070 if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
2087 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { 2071 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) {
2088#ifdef XXX
2089 Len = sizeof(SK_U64);
2090 SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
2091 &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port),
2092 pAC->Rlmt.Port[Port].Net->NetNumber);
2093#endif /* XXX */
2094 /* Snap statistic counters */ 2072 /* Snap statistic counters */
2095 (void)SkXmUpdateStats(pAC, IoC, Port); 2073 (void)SkXmUpdateStats(pAC, IoC, Port);
2096 2074
diff --git a/drivers/net/sk98lin/ski2c.c b/drivers/net/sk98lin/ski2c.c
index 075a0464e56b..79bf57cb5326 100644
--- a/drivers/net/sk98lin/ski2c.c
+++ b/drivers/net/sk98lin/ski2c.c
@@ -396,7 +396,7 @@ int Rw) /* Read / Write Flag */
396 * 1: error, transfer does not complete, I2C transfer 396 * 1: error, transfer does not complete, I2C transfer
397 * killed, wait loop terminated. 397 * killed, wait loop terminated.
398 */ 398 */
399int SkI2cWait( 399static int SkI2cWait(
400SK_AC *pAC, /* Adapter Context */ 400SK_AC *pAC, /* Adapter Context */
401SK_IOC IoC, /* I/O Context */ 401SK_IOC IoC, /* I/O Context */
402int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */ 402int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */
@@ -481,7 +481,7 @@ SK_IOC IoC) /* I/O Context */
481 * returns 0: success 481 * returns 0: success
482 * 1: error 482 * 1: error
483 */ 483 */
484int SkI2cWrite( 484static int SkI2cWrite(
485SK_AC *pAC, /* Adapter Context */ 485SK_AC *pAC, /* Adapter Context */
486SK_IOC IoC, /* I/O Context */ 486SK_IOC IoC, /* I/O Context */
487SK_U32 I2cData, /* I2C Data to write */ 487SK_U32 I2cData, /* I2C Data to write */
@@ -538,7 +538,7 @@ int I2cBurst) /* I2C Burst Flag */
538 * 1 if the read is completed 538 * 1 if the read is completed
539 * 0 if the read must be continued (I2C Bus still allocated) 539 * 0 if the read must be continued (I2C Bus still allocated)
540 */ 540 */
541int SkI2cReadSensor( 541static int SkI2cReadSensor(
542SK_AC *pAC, /* Adapter Context */ 542SK_AC *pAC, /* Adapter Context */
543SK_IOC IoC, /* I/O Context */ 543SK_IOC IoC, /* I/O Context */
544SK_SENSOR *pSen) /* Sensor to be read */ 544SK_SENSOR *pSen) /* Sensor to be read */
diff --git a/drivers/net/sk98lin/sklm80.c b/drivers/net/sk98lin/sklm80.c
index 68292d18175b..a204f5bb55d4 100644
--- a/drivers/net/sk98lin/sklm80.c
+++ b/drivers/net/sk98lin/sklm80.c
@@ -34,79 +34,7 @@ static const char SysKonnectFileId[] =
34#include "h/lm80.h" 34#include "h/lm80.h"
35#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ 35#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
36 36
37#ifdef SK_DIAG
38#define BREAK_OR_WAIT(pAC,IoC,Event) SkI2cWait(pAC,IoC,Event)
39#else /* nSK_DIAG */
40#define BREAK_OR_WAIT(pAC,IoC,Event) break 37#define BREAK_OR_WAIT(pAC,IoC,Event) break
41#endif /* nSK_DIAG */
42
43#ifdef SK_DIAG
44/*
45 * read the register 'Reg' from the device 'Dev'
46 *
47 * return read error -1
48 * success the read value
49 */
50int SkLm80RcvReg(
51SK_IOC IoC, /* Adapter Context */
52int Dev, /* I2C device address */
53int Reg) /* register to read */
54{
55 int Val = 0;
56 int TempExt;
57
58 /* Signal device number */
59 if (SkI2cSndDev(IoC, Dev, I2C_WRITE)) {
60 return(-1);
61 }
62
63 if (SkI2cSndByte(IoC, Reg)) {
64 return(-1);
65 }
66
67 /* repeat start */
68 if (SkI2cSndDev(IoC, Dev, I2C_READ)) {
69 return(-1);
70 }
71
72 switch (Reg) {
73 case LM80_TEMP_IN:
74 Val = (int)SkI2cRcvByte(IoC, 1);
75
76 /* First: correct the value: it might be negative */
77 if ((Val & 0x80) != 0) {
78 /* Value is negative */
79 Val = Val - 256;
80 }
81 Val = Val * SK_LM80_TEMP_LSB;
82 SkI2cStop(IoC);
83
84 TempExt = (int)SkLm80RcvReg(IoC, LM80_ADDR, LM80_TEMP_CTRL);
85
86 if (Val > 0) {
87 Val += ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB);
88 }
89 else {
90 Val -= ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB);
91 }
92 return(Val);
93 break;
94 case LM80_VT0_IN:
95 case LM80_VT1_IN:
96 case LM80_VT2_IN:
97 case LM80_VT3_IN:
98 Val = (int)SkI2cRcvByte(IoC, 1) * SK_LM80_VT_LSB;
99 break;
100
101 default:
102 Val = (int)SkI2cRcvByte(IoC, 1);
103 break;
104 }
105
106 SkI2cStop(IoC);
107 return(Val);
108}
109#endif /* SK_DIAG */
110 38
111/* 39/*
112 * read a sensors value (LM80 specific) 40 * read a sensors value (LM80 specific)
diff --git a/drivers/net/sk98lin/skrlmt.c b/drivers/net/sk98lin/skrlmt.c
index 9ea11ab2296a..be8d1ccddf6d 100644
--- a/drivers/net/sk98lin/skrlmt.c
+++ b/drivers/net/sk98lin/skrlmt.c
@@ -282,7 +282,6 @@ typedef struct s_SpTreeRlmtPacket {
282 282
283SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}}; 283SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}};
284SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}}; 284SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}};
285SK_MAC_ADDR BcAddr = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}};
286 285
287/* local variables ************************************************************/ 286/* local variables ************************************************************/
288 287
diff --git a/drivers/net/sk98lin/skvpd.c b/drivers/net/sk98lin/skvpd.c
index eb3c8988ced1..17786056c66a 100644
--- a/drivers/net/sk98lin/skvpd.c
+++ b/drivers/net/sk98lin/skvpd.c
@@ -132,65 +132,6 @@ int addr) /* VPD address */
132 132
133#endif /* SKDIAG */ 133#endif /* SKDIAG */
134 134
135#if 0
136
137/*
138 Write the dword 'data' at address 'addr' into the VPD EEPROM, and
139 verify that the data is written.
140
141 Needed Time:
142
143. MIN MAX
144. -------------------------------------------------------------------
145. write 1.8 ms 3.6 ms
146. internal write cyles 0.7 ms 7.0 ms
147. -------------------------------------------------------------------
148. over all program time 2.5 ms 10.6 ms
149. read 1.3 ms 2.6 ms
150. -------------------------------------------------------------------
151. over all 3.8 ms 13.2 ms
152.
153
154
155 Returns 0: success
156 1: error, I2C transfer does not terminate
157 2: error, data verify error
158
159 */
160static int VpdWriteDWord(
161SK_AC *pAC, /* pAC pointer */
162SK_IOC IoC, /* IO Context */
163int addr, /* VPD address */
164SK_U32 data) /* VPD data to write */
165{
166 /* start VPD write */
167 /* Don't swap here, it's a data stream of bytes */
168 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
169 ("VPD write dword at addr 0x%x, data = 0x%x\n",addr,data));
170 VPD_OUT32(pAC, IoC, PCI_VPD_DAT_REG, (SK_U32)data);
171 /* But do it here */
172 addr |= VPD_WRITE;
173
174 VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)(addr | VPD_WRITE));
175
176 /* this may take up to 10,6 ms */
177 if (VpdWait(pAC, IoC, VPD_WRITE)) {
178 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
179 ("Write Timed Out\n"));
180 return(1);
181 };
182
183 /* verify data */
184 if (VpdReadDWord(pAC, IoC, addr) != data) {
185 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
186 ("Data Verify Error\n"));
187 return(2);
188 }
189 return(0);
190} /* VpdWriteDWord */
191
192#endif /* 0 */
193
194/* 135/*
195 * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from 136 * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from
196 * or to the I2C EEPROM. 137 * or to the I2C EEPROM.
@@ -728,7 +669,7 @@ char *etp) /* end pointer input position */
728 * 6: fatal VPD error 669 * 6: fatal VPD error
729 * 670 *
730 */ 671 */
731int VpdSetupPara( 672static int VpdSetupPara(
732SK_AC *pAC, /* common data base */ 673SK_AC *pAC, /* common data base */
733const char *key, /* keyword to insert */ 674const char *key, /* keyword to insert */
734const char *buf, /* buffer with the keyword value */ 675const char *buf, /* buffer with the keyword value */
@@ -1148,50 +1089,3 @@ SK_IOC IoC) /* IO Context */
1148 return(0); 1089 return(0);
1149} 1090}
1150 1091
1151
1152
1153/*
1154 * Read the contents of the VPD EEPROM and copy it to the VPD buffer
1155 * if not already done. If the keyword "VF" is not present it will be
1156 * created and the error log message will be stored to this keyword.
1157 * If "VF" is not present the error log message will be stored to the
1158 * keyword "VL". "VL" will created or overwritten if "VF" is present.
1159 * The VPD read/write area is saved to the VPD EEPROM.
1160 *
1161 * returns nothing, errors will be ignored.
1162 */
1163void VpdErrLog(
1164SK_AC *pAC, /* common data base */
1165SK_IOC IoC, /* IO Context */
1166char *msg) /* error log message */
1167{
1168 SK_VPD_PARA *v, vf; /* VF */
1169 int len;
1170
1171 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX,
1172 ("VPD error log msg %s\n", msg));
1173 if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
1174 if (VpdInit(pAC, IoC) != 0) {
1175 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
1176 ("VPD init error\n"));
1177 return;
1178 }
1179 }
1180
1181 len = strlen(msg);
1182 if (len > VPD_MAX_LEN) {
1183 /* cut it */
1184 len = VPD_MAX_LEN;
1185 }
1186 if ((v = vpd_find_para(pAC, VPD_VF, &vf)) != NULL) {
1187 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("overwrite VL\n"));
1188 (void)VpdSetupPara(pAC, VPD_VL, msg, len, VPD_RW_KEY, OWR_KEY);
1189 }
1190 else {
1191 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("write VF\n"));
1192 (void)VpdSetupPara(pAC, VPD_VF, msg, len, VPD_RW_KEY, ADD_KEY);
1193 }
1194
1195 (void)VpdUpdate(pAC, IoC);
1196}
1197
diff --git a/drivers/net/sk98lin/skxmac2.c b/drivers/net/sk98lin/skxmac2.c
index 42d2d963150a..b4e75022a657 100644
--- a/drivers/net/sk98lin/skxmac2.c
+++ b/drivers/net/sk98lin/skxmac2.c
@@ -41,13 +41,13 @@ static const char SysKonnectFileId[] =
41#endif 41#endif
42 42
43#ifdef GENESIS 43#ifdef GENESIS
44BCOM_HACK BcomRegA1Hack[] = { 44static BCOM_HACK BcomRegA1Hack[] = {
45 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 45 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
46 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 46 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
47 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 47 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
48 { 0, 0 } 48 { 0, 0 }
49}; 49};
50BCOM_HACK BcomRegC0Hack[] = { 50static BCOM_HACK BcomRegC0Hack[] = {
51 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 }, 51 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 },
52 { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 52 { 0x15, 0x0A04 }, { 0x18, 0x0420 },
53 { 0, 0 } 53 { 0, 0 }
@@ -790,7 +790,7 @@ int Port) /* Port Index (MAC_1 + n) */
790 * Returns: 790 * Returns:
791 * nothing 791 * nothing
792 */ 792 */
793void SkMacFlushRxFifo( 793static void SkMacFlushRxFifo(
794SK_AC *pAC, /* adapter context */ 794SK_AC *pAC, /* adapter context */
795SK_IOC IoC, /* IO context */ 795SK_IOC IoC, /* IO context */
796int Port) /* Port Index (MAC_1 + n) */ 796int Port) /* Port Index (MAC_1 + n) */
@@ -1231,38 +1231,6 @@ int Port) /* Port Index (MAC_1 + n) */
1231} /* SkMacHardRst */ 1231} /* SkMacHardRst */
1232 1232
1233 1233
1234/******************************************************************************
1235 *
1236 * SkMacClearRst() - Clear the MAC reset
1237 *
1238 * Description: calls a clear MAC reset routine dep. on board type
1239 *
1240 * Returns:
1241 * nothing
1242 */
1243void SkMacClearRst(
1244SK_AC *pAC, /* adapter context */
1245SK_IOC IoC, /* IO context */
1246int Port) /* Port Index (MAC_1 + n) */
1247{
1248
1249#ifdef GENESIS
1250 if (pAC->GIni.GIGenesis) {
1251
1252 SkXmClearRst(pAC, IoC, Port);
1253 }
1254#endif /* GENESIS */
1255
1256#ifdef YUKON
1257 if (pAC->GIni.GIYukon) {
1258
1259 SkGmClearRst(pAC, IoC, Port);
1260 }
1261#endif /* YUKON */
1262
1263} /* SkMacClearRst */
1264
1265
1266#ifdef GENESIS 1234#ifdef GENESIS
1267/****************************************************************************** 1235/******************************************************************************
1268 * 1236 *
@@ -1713,7 +1681,7 @@ int Port) /* Port Index (MAC_1 + n) */
1713 * Returns: 1681 * Returns:
1714 * nothing 1682 * nothing
1715 */ 1683 */
1716void SkXmInitDupMd( 1684static void SkXmInitDupMd(
1717SK_AC *pAC, /* adapter context */ 1685SK_AC *pAC, /* adapter context */
1718SK_IOC IoC, /* IO context */ 1686SK_IOC IoC, /* IO context */
1719int Port) /* Port Index (MAC_1 + n) */ 1687int Port) /* Port Index (MAC_1 + n) */
@@ -1761,7 +1729,7 @@ int Port) /* Port Index (MAC_1 + n) */
1761 * Returns: 1729 * Returns:
1762 * nothing 1730 * nothing
1763 */ 1731 */
1764void SkXmInitPauseMd( 1732static void SkXmInitPauseMd(
1765SK_AC *pAC, /* adapter context */ 1733SK_AC *pAC, /* adapter context */
1766SK_IOC IoC, /* IO context */ 1734SK_IOC IoC, /* IO context */
1767int Port) /* Port Index (MAC_1 + n) */ 1735int Port) /* Port Index (MAC_1 + n) */
@@ -2076,283 +2044,7 @@ SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
2076} /* SkXmInitPhyBcom */ 2044} /* SkXmInitPhyBcom */
2077#endif /* GENESIS */ 2045#endif /* GENESIS */
2078 2046
2079
2080#ifdef YUKON 2047#ifdef YUKON
2081#ifndef SK_SLIM
2082/******************************************************************************
2083 *
2084 * SkGmEnterLowPowerMode()
2085 *
2086 * Description:
2087 * This function sets the Marvell Alaska PHY to the low power mode
2088 * given by parameter mode.
2089 * The following low power modes are available:
2090 *
2091 * - Coma Mode (Deep Sleep):
2092 * Power consumption: ~15 - 30 mW
2093 * The PHY cannot wake up on its own.
2094 *
2095 * - IEEE 22.2.4.1.5 compatible power down mode
2096 * Power consumption: ~240 mW
2097 * The PHY cannot wake up on its own.
2098 *
2099 * - energy detect mode
2100 * Power consumption: ~160 mW
2101 * The PHY can wake up on its own by detecting activity
2102 * on the CAT 5 cable.
2103 *
2104 * - energy detect plus mode
2105 * Power consumption: ~150 mW
2106 * The PHY can wake up on its own by detecting activity
2107 * on the CAT 5 cable.
2108 * Connected devices can be woken up by sending normal link
2109 * pulses every one second.
2110 *
2111 * Note:
2112 *
2113 * Returns:
2114 * 0: ok
2115 * 1: error
2116 */
2117int SkGmEnterLowPowerMode(
2118SK_AC *pAC, /* adapter context */
2119SK_IOC IoC, /* IO context */
2120int Port, /* Port Index (e.g. MAC_1) */
2121SK_U8 Mode) /* low power mode */
2122{
2123 SK_U16 Word;
2124 SK_U32 DWord;
2125 SK_U8 LastMode;
2126 int Ret = 0;
2127
2128 if (pAC->GIni.GIYukonLite &&
2129 pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
2130
2131 /* save current power mode */
2132 LastMode = pAC->GIni.GP[Port].PPhyPowerState;
2133 pAC->GIni.GP[Port].PPhyPowerState = Mode;
2134
2135 switch (Mode) {
2136 /* coma mode (deep sleep) */
2137 case PHY_PM_DEEP_SLEEP:
2138 /* setup General Purpose Control Register */
2139 GM_OUT16(IoC, 0, GM_GP_CTRL, GM_GPCR_FL_PASS |
2140 GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
2141
2142 /* apply COMA mode workaround */
2143 SkGmPhyWrite(pAC, IoC, Port, 29, 0x001f);
2144 SkGmPhyWrite(pAC, IoC, Port, 30, 0xfff3);
2145
2146 SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord);
2147
2148 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2149
2150 /* Set PHY to Coma Mode */
2151 SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord | PCI_PHY_COMA);
2152
2153 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2154
2155 break;
2156
2157 /* IEEE 22.2.4.1.5 compatible power down mode */
2158 case PHY_PM_IEEE_POWER_DOWN:
2159 /*
2160 * - disable MAC 125 MHz clock
2161 * - allow MAC power down
2162 */
2163 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
2164 Word |= PHY_M_PC_DIS_125CLK;
2165 Word &= ~PHY_M_PC_MAC_POW_UP;
2166 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
2167
2168 /*
2169 * register changes must be followed by a software
2170 * reset to take effect
2171 */
2172 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
2173 Word |= PHY_CT_RESET;
2174 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
2175
2176 /* switch IEEE compatible power down mode on */
2177 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
2178 Word |= PHY_CT_PDOWN;
2179 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
2180 break;
2181
2182 /* energy detect and energy detect plus mode */
2183 case PHY_PM_ENERGY_DETECT:
2184 case PHY_PM_ENERGY_DETECT_PLUS:
2185 /*
2186 * - disable MAC 125 MHz clock
2187 */
2188 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
2189 Word |= PHY_M_PC_DIS_125CLK;
2190 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
2191
2192 /* activate energy detect mode 1 */
2193 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
2194
2195 /* energy detect mode */
2196 if (Mode == PHY_PM_ENERGY_DETECT) {
2197 Word |= PHY_M_PC_EN_DET;
2198 }
2199 /* energy detect plus mode */
2200 else {
2201 Word |= PHY_M_PC_EN_DET_PLUS;
2202 }
2203
2204 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
2205
2206 /*
2207 * reinitialize the PHY to force a software reset
2208 * which is necessary after the register settings
2209 * for the energy detect modes.
2210 * Furthermore reinitialisation prevents that the
2211 * PHY is running out of a stable state.
2212 */
2213 SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
2214 break;
2215
2216 /* don't change current power mode */
2217 default:
2218 pAC->GIni.GP[Port].PPhyPowerState = LastMode;
2219 Ret = 1;
2220 break;
2221 }
2222 }
2223 /* low power modes are not supported by this chip */
2224 else {
2225 Ret = 1;
2226 }
2227
2228 return(Ret);
2229
2230} /* SkGmEnterLowPowerMode */
2231
2232/******************************************************************************
2233 *
2234 * SkGmLeaveLowPowerMode()
2235 *
2236 * Description:
2237 * Leave the current low power mode and switch to normal mode
2238 *
2239 * Note:
2240 *
2241 * Returns:
2242 * 0: ok
2243 * 1: error
2244 */
2245int SkGmLeaveLowPowerMode(
2246SK_AC *pAC, /* adapter context */
2247SK_IOC IoC, /* IO context */
2248int Port) /* Port Index (e.g. MAC_1) */
2249{
2250 SK_U32 DWord;
2251 SK_U16 Word;
2252 SK_U8 LastMode;
2253 int Ret = 0;
2254
2255 if (pAC->GIni.GIYukonLite &&
2256 pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
2257
2258 /* save current power mode */
2259 LastMode = pAC->GIni.GP[Port].PPhyPowerState;
2260 pAC->GIni.GP[Port].PPhyPowerState = PHY_PM_OPERATIONAL_MODE;
2261
2262 switch (LastMode) {
2263 /* coma mode (deep sleep) */
2264 case PHY_PM_DEEP_SLEEP:
2265 SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord);
2266
2267 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2268
2269 /* Release PHY from Coma Mode */
2270 SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord & ~PCI_PHY_COMA);
2271
2272 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2273
2274 SK_IN32(IoC, B2_GP_IO, &DWord);
2275
2276 /* set to output */
2277 DWord |= (GP_DIR_9 | GP_IO_9);
2278
2279 /* set PHY reset */
2280 SK_OUT32(IoC, B2_GP_IO, DWord);
2281
2282 DWord &= ~GP_IO_9; /* clear PHY reset (active high) */
2283
2284 /* clear PHY reset */
2285 SK_OUT32(IoC, B2_GP_IO, DWord);
2286 break;
2287
2288 /* IEEE 22.2.4.1.5 compatible power down mode */
2289 case PHY_PM_IEEE_POWER_DOWN:
2290 /*
2291 * - enable MAC 125 MHz clock
2292 * - set MAC power up
2293 */
2294 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
2295 Word &= ~PHY_M_PC_DIS_125CLK;
2296 Word |= PHY_M_PC_MAC_POW_UP;
2297 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
2298
2299 /*
2300 * register changes must be followed by a software
2301 * reset to take effect
2302 */
2303 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
2304 Word |= PHY_CT_RESET;
2305 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
2306
2307 /* switch IEEE compatible power down mode off */
2308 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
2309 Word &= ~PHY_CT_PDOWN;
2310 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
2311 break;
2312
2313 /* energy detect and energy detect plus mode */
2314 case PHY_PM_ENERGY_DETECT:
2315 case PHY_PM_ENERGY_DETECT_PLUS:
2316 /*
2317 * - enable MAC 125 MHz clock
2318 */
2319 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
2320 Word &= ~PHY_M_PC_DIS_125CLK;
2321 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
2322
2323 /* disable energy detect mode */
2324 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
2325 Word &= ~PHY_M_PC_EN_DET_MSK;
2326 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
2327
2328 /*
2329 * reinitialize the PHY to force a software reset
2330 * which is necessary after the register settings
2331 * for the energy detect modes.
2332 * Furthermore reinitialisation prevents that the
2333 * PHY is running out of a stable state.
2334 */
2335 SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
2336 break;
2337
2338 /* don't change current power mode */
2339 default:
2340 pAC->GIni.GP[Port].PPhyPowerState = LastMode;
2341 Ret = 1;
2342 break;
2343 }
2344 }
2345 /* low power modes are not supported by this chip */
2346 else {
2347 Ret = 1;
2348 }
2349
2350 return(Ret);
2351
2352} /* SkGmLeaveLowPowerMode */
2353#endif /* !SK_SLIM */
2354
2355
2356/****************************************************************************** 2048/******************************************************************************
2357 * 2049 *
2358 * SkGmInitPhyMarv() - Initialize the Marvell Phy registers 2050 * SkGmInitPhyMarv() - Initialize the Marvell Phy registers
@@ -3420,145 +3112,6 @@ int Port) /* Port Index (MAC_1 + n) */
3420} /* SkMacAutoNegDone */ 3112} /* SkMacAutoNegDone */
3421 3113
3422 3114
3423#ifdef GENESIS
3424/******************************************************************************
3425 *
3426 * SkXmSetRxTxEn() - Special Set Rx/Tx Enable and some features in XMAC
3427 *
3428 * Description:
3429 * sets MAC or PHY LoopBack and Duplex Mode in the MMU Command Reg.
3430 * enables Rx/Tx
3431 *
3432 * Returns: N/A
3433 */
3434static void SkXmSetRxTxEn(
3435SK_AC *pAC, /* Adapter Context */
3436SK_IOC IoC, /* IO context */
3437int Port, /* Port Index (MAC_1 + n) */
3438int Para) /* Parameter to set: MAC or PHY LoopBack, Duplex Mode */
3439{
3440 SK_U16 Word;
3441
3442 XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
3443
3444 switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) {
3445 case SK_MAC_LOOPB_ON:
3446 Word |= XM_MMU_MAC_LB;
3447 break;
3448 case SK_MAC_LOOPB_OFF:
3449 Word &= ~XM_MMU_MAC_LB;
3450 break;
3451 }
3452
3453 switch (Para & (SK_PHY_LOOPB_ON | SK_PHY_LOOPB_OFF)) {
3454 case SK_PHY_LOOPB_ON:
3455 Word |= XM_MMU_GMII_LOOP;
3456 break;
3457 case SK_PHY_LOOPB_OFF:
3458 Word &= ~XM_MMU_GMII_LOOP;
3459 break;
3460 }
3461
3462 switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) {
3463 case SK_PHY_FULLD_ON:
3464 Word |= XM_MMU_GMII_FD;
3465 break;
3466 case SK_PHY_FULLD_OFF:
3467 Word &= ~XM_MMU_GMII_FD;
3468 break;
3469 }
3470
3471 XM_OUT16(IoC, Port, XM_MMU_CMD, Word | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
3472
3473 /* dummy read to ensure writing */
3474 XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
3475
3476} /* SkXmSetRxTxEn */
3477#endif /* GENESIS */
3478
3479
3480#ifdef YUKON
3481/******************************************************************************
3482 *
3483 * SkGmSetRxTxEn() - Special Set Rx/Tx Enable and some features in GMAC
3484 *
3485 * Description:
3486 * sets MAC LoopBack and Duplex Mode in the General Purpose Control Reg.
3487 * enables Rx/Tx
3488 *
3489 * Returns: N/A
3490 */
3491static void SkGmSetRxTxEn(
3492SK_AC *pAC, /* Adapter Context */
3493SK_IOC IoC, /* IO context */
3494int Port, /* Port Index (MAC_1 + n) */
3495int Para) /* Parameter to set: MAC LoopBack, Duplex Mode */
3496{
3497 SK_U16 Ctrl;
3498
3499 GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl);
3500
3501 switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) {
3502 case SK_MAC_LOOPB_ON:
3503 Ctrl |= GM_GPCR_LOOP_ENA;
3504 break;
3505 case SK_MAC_LOOPB_OFF:
3506 Ctrl &= ~GM_GPCR_LOOP_ENA;
3507 break;
3508 }
3509
3510 switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) {
3511 case SK_PHY_FULLD_ON:
3512 Ctrl |= GM_GPCR_DUP_FULL;
3513 break;
3514 case SK_PHY_FULLD_OFF:
3515 Ctrl &= ~GM_GPCR_DUP_FULL;
3516 break;
3517 }
3518
3519 GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Ctrl | GM_GPCR_RX_ENA |
3520 GM_GPCR_TX_ENA));
3521
3522 /* dummy read to ensure writing */
3523 GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl);
3524
3525} /* SkGmSetRxTxEn */
3526#endif /* YUKON */
3527
3528
3529#ifndef SK_SLIM
3530/******************************************************************************
3531 *
3532 * SkMacSetRxTxEn() - Special Set Rx/Tx Enable and parameters
3533 *
3534 * Description: calls the Special Set Rx/Tx Enable routines dep. on board type
3535 *
3536 * Returns: N/A
3537 */
3538void SkMacSetRxTxEn(
3539SK_AC *pAC, /* Adapter Context */
3540SK_IOC IoC, /* IO context */
3541int Port, /* Port Index (MAC_1 + n) */
3542int Para)
3543{
3544#ifdef GENESIS
3545 if (pAC->GIni.GIGenesis) {
3546
3547 SkXmSetRxTxEn(pAC, IoC, Port, Para);
3548 }
3549#endif /* GENESIS */
3550
3551#ifdef YUKON
3552 if (pAC->GIni.GIYukon) {
3553
3554 SkGmSetRxTxEn(pAC, IoC, Port, Para);
3555 }
3556#endif /* YUKON */
3557
3558} /* SkMacSetRxTxEn */
3559#endif /* !SK_SLIM */
3560
3561
3562/****************************************************************************** 3115/******************************************************************************
3563 * 3116 *
3564 * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up 3117 * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up
@@ -3976,7 +3529,7 @@ SK_U16 PhyStat) /* PHY Status word to analyse */
3976 * Returns: 3529 * Returns:
3977 * nothing 3530 * nothing
3978 */ 3531 */
3979void SkXmIrq( 3532static void SkXmIrq(
3980SK_AC *pAC, /* adapter context */ 3533SK_AC *pAC, /* adapter context */
3981SK_IOC IoC, /* IO context */ 3534SK_IOC IoC, /* IO context */
3982int Port) /* Port Index (MAC_1 + n) */ 3535int Port) /* Port Index (MAC_1 + n) */
@@ -4112,7 +3665,7 @@ int Port) /* Port Index (MAC_1 + n) */
4112 * Returns: 3665 * Returns:
4113 * nothing 3666 * nothing
4114 */ 3667 */
4115void SkGmIrq( 3668static void SkGmIrq(
4116SK_AC *pAC, /* adapter context */ 3669SK_AC *pAC, /* adapter context */
4117SK_IOC IoC, /* IO context */ 3670SK_IOC IoC, /* IO context */
4118int Port) /* Port Index (MAC_1 + n) */ 3671int Port) /* Port Index (MAC_1 + n) */
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index a2ed47f1cc70..a4b2b6975d6c 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -89,21 +89,21 @@ static const u_short my_sagp = 0xffff ; /* short group address (n.u.) */
89/* 89/*
90 * useful interrupt bits 90 * useful interrupt bits
91 */ 91 */
92static int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ; 92static const int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ;
93static int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0| 93static const int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0|
94 FM_STBURS | FM_STBURA0 ; 94 FM_STBURS | FM_STBURA0 ;
95 95
96 /* delete FM_SRBFL after tests */ 96 /* delete FM_SRBFL after tests */
97static int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL | 97static const int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL |
98 FM_SMYCLM ; 98 FM_SMYCLM ;
99static int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR | 99static const int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR |
100 FM_SERRCTR | FM_SLSTCTR | 100 FM_SERRCTR | FM_SLSTCTR |
101 FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ; 101 FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ;
102 102
103static int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ; 103static const int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ;
104static int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ; 104static const int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ;
105 105
106static int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC | 106static const int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC |
107 FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ; 107 FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ;
108 108
109 109
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index cd0aa4c151b0..74e129f3ce92 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -186,7 +186,7 @@ static const struct plt {
186 * Do we need the EBUF error during signaling, too, to detect SUPERNET_3 186 * Do we need the EBUF error during signaling, too, to detect SUPERNET_3
187 * PLL bug? 187 * PLL bug?
188 */ 188 */
189static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | 189static const int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
190 PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; 190 PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
191#else /* SUPERNET_3 */ 191#else /* SUPERNET_3 */
192/* 192/*
@@ -195,7 +195,7 @@ static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
195static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | 195static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
196 PL_PCM_ENABLED | PL_SELF_TEST ; 196 PL_PCM_ENABLED | PL_SELF_TEST ;
197#endif /* SUPERNET_3 */ 197#endif /* SUPERNET_3 */
198static int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK | 198static const int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
199 PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR; 199 PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
200 200
201/* external functions */ 201/* external functions */
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 4b5ed2c63177..c7fb6133047e 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -67,7 +67,7 @@
67/* each new release!!! */ 67/* each new release!!! */
68#define VERSION "2.07" 68#define VERSION "2.07"
69 69
70static const char *boot_msg = 70static const char * const boot_msg =
71 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n" 71 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
72 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)"; 72 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
73 73
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index d167deda9a53..35b18057fbdd 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -201,7 +201,7 @@ static int max_interrupt_work = 20;
201static int mtu; 201static int mtu;
202/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 202/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
203 The Starfire has a 512 element hash table based on the Ethernet CRC. */ 203 The Starfire has a 512 element hash table based on the Ethernet CRC. */
204static int multicast_filter_limit = 512; 204static const int multicast_filter_limit = 512;
205/* Whether to do TCP/UDP checksums in hardware */ 205/* Whether to do TCP/UDP checksums in hardware */
206static int enable_hw_cksum = 1; 206static int enable_hw_cksum = 1;
207 207
@@ -463,7 +463,7 @@ static struct pci_device_id starfire_pci_tbl[] = {
463MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); 463MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
464 464
465/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */ 465/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
466static struct chip_info { 466static const struct chip_info {
467 const char *name; 467 const char *name;
468 int drv_flags; 468 int drv_flags;
469} netdrv_tbl[] __devinitdata = { 469} netdrv_tbl[] __devinitdata = {
@@ -2084,6 +2084,38 @@ static int netdev_close(struct net_device *dev)
2084 return 0; 2084 return 0;
2085} 2085}
2086 2086
2087#ifdef CONFIG_PM
2088static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
2089{
2090 struct net_device *dev = pci_get_drvdata(pdev);
2091
2092 if (netif_running(dev)) {
2093 netif_device_detach(dev);
2094 netdev_close(dev);
2095 }
2096
2097 pci_save_state(pdev);
2098 pci_set_power_state(pdev, pci_choose_state(pdev,state));
2099
2100 return 0;
2101}
2102
2103static int starfire_resume(struct pci_dev *pdev)
2104{
2105 struct net_device *dev = pci_get_drvdata(pdev);
2106
2107 pci_set_power_state(pdev, PCI_D0);
2108 pci_restore_state(pdev);
2109
2110 if (netif_running(dev)) {
2111 netdev_open(dev);
2112 netif_device_attach(dev);
2113 }
2114
2115 return 0;
2116}
2117#endif /* CONFIG_PM */
2118
2087 2119
2088static void __devexit starfire_remove_one (struct pci_dev *pdev) 2120static void __devexit starfire_remove_one (struct pci_dev *pdev)
2089{ 2121{
@@ -2115,6 +2147,10 @@ static struct pci_driver starfire_driver = {
2115 .name = DRV_NAME, 2147 .name = DRV_NAME,
2116 .probe = starfire_init_one, 2148 .probe = starfire_init_one,
2117 .remove = __devexit_p(starfire_remove_one), 2149 .remove = __devexit_p(starfire_remove_one),
2150#ifdef CONFIG_PM
2151 .suspend = starfire_suspend,
2152 .resume = starfire_resume,
2153#endif /* CONFIG_PM */
2118 .id_table = starfire_pci_tbl, 2154 .id_table = starfire_pci_tbl,
2119}; 2155};
2120 2156
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 0ab9c38b4a34..61eec46cb111 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -106,7 +106,7 @@
106static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 106static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
107/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 107/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
108 Typical is a 64 element hash table based on the Ethernet CRC. */ 108 Typical is a 64 element hash table based on the Ethernet CRC. */
109static int multicast_filter_limit = 32; 109static const int multicast_filter_limit = 32;
110 110
111/* Set the copy breakpoint for the copy-only-tiny-frames scheme. 111/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
112 Setting to > 1518 effectively disables this feature. 112 Setting to > 1518 effectively disables this feature.
@@ -298,7 +298,7 @@ enum {
298struct pci_id_info { 298struct pci_id_info {
299 const char *name; 299 const char *name;
300}; 300};
301static struct pci_id_info pci_id_tbl[] = { 301static const struct pci_id_info pci_id_tbl[] = {
302 {"D-Link DFE-550TX FAST Ethernet Adapter"}, 302 {"D-Link DFE-550TX FAST Ethernet Adapter"},
303 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, 303 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
304 {"D-Link DFE-580TX 4 port Server Adapter"}, 304 {"D-Link DFE-580TX 4 port Server Adapter"},
@@ -633,9 +633,13 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
633 633
634 np->phys[0] = 1; /* Default setting */ 634 np->phys[0] = 1; /* Default setting */
635 np->mii_preamble_required++; 635 np->mii_preamble_required++;
636 /*
637 * It seems some phys doesn't deal well with address 0 being accessed
638 * first, so leave address zero to the end of the loop (32 & 31).
639 */
636 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) { 640 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
637 int mii_status = mdio_read(dev, phy, MII_BMSR);
638 int phyx = phy & 0x1f; 641 int phyx = phy & 0x1f;
642 int mii_status = mdio_read(dev, phyx, MII_BMSR);
639 if (mii_status != 0xffff && mii_status != 0x0000) { 643 if (mii_status != 0xffff && mii_status != 0x0000) {
640 np->phys[phy_idx++] = phyx; 644 np->phys[phy_idx++] = phyx;
641 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); 645 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index d3ddb41d6e5c..cb0aba95d4e3 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -39,7 +39,7 @@
39#include "sungem_phy.h" 39#include "sungem_phy.h"
40 40
41/* Link modes of the BCM5400 PHY */ 41/* Link modes of the BCM5400 PHY */
42static int phy_BCM5400_link_table[8][3] = { 42static const int phy_BCM5400_link_table[8][3] = {
43 { 0, 0, 0 }, /* No link */ 43 { 0, 0, 0 }, /* No link */
44 { 0, 0, 0 }, /* 10BT Half Duplex */ 44 { 0, 0, 0 }, /* 10BT Half Duplex */
45 { 1, 0, 0 }, /* 10BT Full Duplex */ 45 { 1, 0, 0 }, /* 10BT Full Duplex */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index caf4102b54ce..6c6c5498899f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -7802,7 +7802,7 @@ static int tg3_test_link(struct tg3 *tp)
7802} 7802}
7803 7803
7804/* Only test the commonly used registers */ 7804/* Only test the commonly used registers */
7805static int tg3_test_registers(struct tg3 *tp) 7805static const int tg3_test_registers(struct tg3 *tp)
7806{ 7806{
7807 int i, is_5705; 7807 int i, is_5705;
7808 u32 offset, read_mask, write_mask, val, save_val, read_val; 7808 u32 offset, read_mask, write_mask, val, save_val, read_val;
@@ -8016,7 +8016,7 @@ out:
8016 8016
8017static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 8017static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8018{ 8018{
8019 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 8019 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8020 int i; 8020 int i;
8021 u32 j; 8021 u32 j;
8022 8022
@@ -9097,6 +9097,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9097 tp->phy_id = PHY_ID_INVALID; 9097 tp->phy_id = PHY_ID_INVALID;
9098 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 9098 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9099 9099
9100 /* Do not even try poking around in here on Sun parts. */
9101 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9102 return;
9103
9100 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9104 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9101 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9105 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9102 u32 nic_cfg, led_cfg; 9106 u32 nic_cfg, led_cfg;
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 97712c3c4e07..c58a4c31d0dd 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -122,6 +122,7 @@
122#include <linux/spinlock.h> 122#include <linux/spinlock.h>
123#include <linux/version.h> 123#include <linux/version.h>
124#include <linux/bitops.h> 124#include <linux/bitops.h>
125#include <linux/jiffies.h>
125 126
126#include <net/checksum.h> 127#include <net/checksum.h>
127 128
@@ -512,7 +513,7 @@ static int streamer_reset(struct net_device *dev)
512 513
513 while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) { 514 while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) {
514 msleep_interruptible(100); 515 msleep_interruptible(100);
515 if (jiffies - t > 40 * HZ) { 516 if (time_after(jiffies, t + 40 * HZ)) {
516 printk(KERN_ERR 517 printk(KERN_ERR
517 "IBM PCI tokenring card not responding\n"); 518 "IBM PCI tokenring card not responding\n");
518 release_region(dev->base_addr, STREAMER_IO_SPACE); 519 release_region(dev->base_addr, STREAMER_IO_SPACE);
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 05477d24fd49..23032a7bc0a9 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -100,6 +100,7 @@
100#include <linux/pci.h> 100#include <linux/pci.h>
101#include <linux/spinlock.h> 101#include <linux/spinlock.h>
102#include <linux/bitops.h> 102#include <linux/bitops.h>
103#include <linux/jiffies.h>
103 104
104#include <net/checksum.h> 105#include <net/checksum.h>
105 106
@@ -307,7 +308,7 @@ static int __devinit olympic_init(struct net_device *dev)
307 t=jiffies; 308 t=jiffies;
308 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) { 309 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
309 schedule(); 310 schedule();
310 if(jiffies-t > 40*HZ) { 311 if(time_after(jiffies, t + 40*HZ)) {
311 printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); 312 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
312 return -ENODEV; 313 return -ENODEV;
313 } 314 }
@@ -359,7 +360,7 @@ static int __devinit olympic_init(struct net_device *dev)
359 t=jiffies; 360 t=jiffies;
360 while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) { 361 while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
361 schedule() ; 362 schedule() ;
362 if(jiffies-t > 2*HZ) { 363 if(time_after(jiffies, t + 2*HZ)) {
363 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ; 364 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
364 return -ENODEV; 365 return -ENODEV;
365 } 366 }
@@ -373,7 +374,7 @@ static int __devinit olympic_init(struct net_device *dev)
373 t=jiffies; 374 t=jiffies;
374 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) { 375 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
375 schedule(); 376 schedule();
376 if(jiffies-t > 15*HZ) { 377 if(time_after(jiffies, t + 15*HZ)) {
377 printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); 378 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
378 return -ENODEV; 379 return -ENODEV;
379 } 380 }
@@ -519,7 +520,7 @@ static int olympic_open(struct net_device *dev)
519 olympic_priv->srb_queued=0; 520 olympic_priv->srb_queued=0;
520 break; 521 break;
521 } 522 }
522 if ((jiffies-t) > 10*HZ) { 523 if (time_after(jiffies, t + 10*HZ)) {
523 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ; 524 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
524 olympic_priv->srb_queued=0; 525 olympic_priv->srb_queued=0;
525 break ; 526 break ;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 2d0cfbceee22..6299e186c73f 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -402,8 +402,7 @@ static void de_rx (struct de_private *de)
402 unsigned copying_skb, buflen; 402 unsigned copying_skb, buflen;
403 403
404 skb = de->rx_skb[rx_tail].skb; 404 skb = de->rx_skb[rx_tail].skb;
405 if (!skb) 405 BUG_ON(!skb);
406 BUG();
407 rmb(); 406 rmb();
408 status = le32_to_cpu(de->rx_ring[rx_tail].opts1); 407 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
409 if (status & DescOwn) 408 if (status & DescOwn)
@@ -545,8 +544,7 @@ static void de_tx (struct de_private *de)
545 break; 544 break;
546 545
547 skb = de->tx_skb[tx_tail].skb; 546 skb = de->tx_skb[tx_tail].skb;
548 if (!skb) 547 BUG_ON(!skb);
549 BUG();
550 if (unlikely(skb == DE_DUMMY_SKB)) 548 if (unlikely(skb == DE_DUMMY_SKB))
551 goto next; 549 goto next;
552 550
@@ -789,8 +787,7 @@ static void __de_set_rx_mode (struct net_device *dev)
789 787
790 de->tx_head = NEXT_TX(entry); 788 de->tx_head = NEXT_TX(entry);
791 789
792 if (TX_BUFFS_AVAIL(de) < 0) 790 BUG_ON(TX_BUFFS_AVAIL(de) < 0);
793 BUG();
794 if (TX_BUFFS_AVAIL(de) == 0) 791 if (TX_BUFFS_AVAIL(de) == 0)
795 netif_stop_queue(dev); 792 netif_stop_queue(dev);
796 793
@@ -916,8 +913,7 @@ static void de_set_media (struct de_private *de)
916 unsigned media = de->media_type; 913 unsigned media = de->media_type;
917 u32 macmode = dr32(MacMode); 914 u32 macmode = dr32(MacMode);
918 915
919 if (de_is_running(de)) 916 BUG_ON(de_is_running(de));
920 BUG();
921 917
922 if (de->de21040) 918 if (de->de21040)
923 dw32(CSR11, FULL_DUPLEX_MAGIC); 919 dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -1153,8 +1149,7 @@ static void de_media_interrupt (struct de_private *de, u32 status)
1153 return; 1149 return;
1154 } 1150 }
1155 1151
1156 if (!(status & LinkFail)) 1152 BUG_ON(!(status & LinkFail));
1157 BUG();
1158 1153
1159 if (netif_carrier_ok(de->dev)) { 1154 if (netif_carrier_ok(de->dev)) {
1160 de_link_down(de); 1155 de_link_down(de);
@@ -2092,8 +2087,7 @@ static void __exit de_remove_one (struct pci_dev *pdev)
2092 struct net_device *dev = pci_get_drvdata(pdev); 2087 struct net_device *dev = pci_get_drvdata(pdev);
2093 struct de_private *de = dev->priv; 2088 struct de_private *de = dev->priv;
2094 2089
2095 if (!dev) 2090 BUG_ON(!dev);
2096 BUG();
2097 unregister_netdev(dev); 2091 unregister_netdev(dev);
2098 kfree(de->ee_data); 2092 kfree(de->ee_data);
2099 iounmap(de->regs); 2093 iounmap(de->regs);
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index d9980bde7508..ca7e53246adb 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/jiffies.h>
19#include "tulip.h" 20#include "tulip.h"
20 21
21 22
@@ -68,7 +69,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
68 */ 69 */
69 if (tulip_media_cap[dev->if_port] & MediaIsMII) 70 if (tulip_media_cap[dev->if_port] & MediaIsMII)
70 return; 71 return;
71 if (! tp->nwayset || jiffies - dev->trans_start > 1*HZ) { 72 if (! tp->nwayset || time_after(jiffies, dev->trans_start + 1*HZ)) {
72 tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff); 73 tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
73 iowrite32(tp->csr6, ioaddr + CSR6); 74 iowrite32(tp->csr6, ioaddr + CSR6);
74 iowrite32(0x30, ioaddr + CSR12); 75 iowrite32(0x30, ioaddr + CSR12);
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 5b1af3986abf..ba05dedf29d3 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1645,7 +1645,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1645 1645
1646 /* no more hardware accesses behind this line. */ 1646 /* no more hardware accesses behind this line. */
1647 1647
1648 if (np->csr6) BUG(); 1648 BUG_ON(np->csr6);
1649 if (ioread32(ioaddr + IntrEnable)) BUG(); 1649 if (ioread32(ioaddr + IntrEnable)) BUG();
1650 1650
1651 /* pci_power_off(pdev, -1); */ 1651 /* pci_power_off(pdev, -1); */
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 60d1e05ab732..56344103ac23 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -32,6 +32,9 @@
32 32
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <asm/io.h> 34#include <asm/io.h>
35#ifdef CONFIG_NET_POLL_CONTROLLER
36#include <asm/irq.h>
37#endif
35 38
36#ifdef DEBUG 39#ifdef DEBUG
37#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__) 40#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
@@ -598,10 +601,8 @@ static void setup_descriptors(struct xircom_private *card)
598 enter("setup_descriptors"); 601 enter("setup_descriptors");
599 602
600 603
601 if (card->rx_buffer == NULL) 604 BUG_ON(card->rx_buffer == NULL);
602 BUG(); 605 BUG_ON(card->tx_buffer == NULL);
603 if (card->tx_buffer == NULL)
604 BUG();
605 606
606 /* Receive descriptors */ 607 /* Receive descriptors */
607 memset(card->rx_buffer, 0, 128); /* clear the descriptors */ 608 memset(card->rx_buffer, 0, 128); /* clear the descriptors */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 4c76cb794bfb..cde35dd87906 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -178,7 +178,7 @@ enum typhoon_cards {
178}; 178};
179 179
180/* directly indexed by enum typhoon_cards, above */ 180/* directly indexed by enum typhoon_cards, above */
181static struct typhoon_card_info typhoon_card_info[] __devinitdata = { 181static const struct typhoon_card_info typhoon_card_info[] __devinitdata = {
182 { "3Com Typhoon (3C990-TX)", 182 { "3Com Typhoon (3C990-TX)",
183 TYPHOON_CRYPTO_NONE}, 183 TYPHOON_CRYPTO_NONE},
184 { "3Com Typhoon (3CR990-TX-95)", 184 { "3Com Typhoon (3CR990-TX-95)",
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 18c27e1e7884..883cf7da10fc 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -459,7 +459,7 @@ config WANPIPE_FR
459 bool "WANPIPE Frame Relay support" 459 bool "WANPIPE Frame Relay support"
460 depends on VENDOR_SANGOMA 460 depends on VENDOR_SANGOMA
461 help 461 help
462 Connect a WANPIPE card to a Frame Relay network, or use Frame Felay 462 Connect a WANPIPE card to a Frame Relay network, or use Frame Relay
463 API to develop custom applications. 463 API to develop custom applications.
464 464
465 Contains the Ethernet Bridging over Frame Relay feature, where 465 Contains the Ethernet Bridging over Frame Relay feature, where
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 7db1d1d0bb34..cf5c805452a3 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -29,6 +29,7 @@
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <net/arp.h> 30#include <net/arp.h>
31 31
32#include <asm/irq.h>
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/dma.h> 34#include <asm/dma.h>
34#include <asm/byteorder.h> 35#include <asm/byteorder.h>
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 5380ddfcd7d5..050e854e7774 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <net/arp.h> 24#include <net/arp.h>
25 25
26#include <asm/irq.h>
26#include <asm/io.h> 27#include <asm/io.h>
27#include <asm/dma.h> 28#include <asm/dma.h>
28#include <asm/byteorder.h> 29#include <asm/byteorder.h>
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index ef85d76575a2..5b0a19a5058d 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -6,7 +6,8 @@ menu "Wireless LAN (non-hamradio)"
6 depends on NETDEVICES 6 depends on NETDEVICES
7 7
8config NET_RADIO 8config NET_RADIO
9 bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions" 9 bool "Wireless LAN drivers (non-hamradio)"
10 select WIRELESS_EXT
10 ---help--- 11 ---help---
11 Support for wireless LANs and everything having to do with radio, 12 Support for wireless LANs and everything having to do with radio,
12 but not with amateur radio or FM broadcasting. 13 but not with amateur radio or FM broadcasting.
@@ -135,8 +136,9 @@ comment "Wireless 802.11b ISA/PCI cards support"
135 136
136config IPW2100 137config IPW2100
137 tristate "Intel PRO/Wireless 2100 Network Connection" 138 tristate "Intel PRO/Wireless 2100 Network Connection"
138 depends on NET_RADIO && PCI && IEEE80211 139 depends on NET_RADIO && PCI
139 select FW_LOADER 140 select FW_LOADER
141 select IEEE80211
140 ---help--- 142 ---help---
141 A driver for the Intel PRO/Wireless 2100 Network 143 A driver for the Intel PRO/Wireless 2100 Network
142 Connection 802.11b wireless network adapter. 144 Connection 802.11b wireless network adapter.
@@ -188,8 +190,9 @@ config IPW2100_DEBUG
188 190
189config IPW2200 191config IPW2200
190 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" 192 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
191 depends on NET_RADIO && IEEE80211 && PCI 193 depends on NET_RADIO && PCI
192 select FW_LOADER 194 select FW_LOADER
195 select IEEE80211
193 ---help--- 196 ---help---
194 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network 197 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
195 Connection adapters. 198 Connection adapters.
@@ -201,7 +204,7 @@ config IPW2200
201 In order to use this driver, you will need a firmware image for it. 204 In order to use this driver, you will need a firmware image for it.
202 You can obtain the firmware from 205 You can obtain the firmware from
203 <http://ipw2200.sf.net/>. See the above referenced README.ipw2200 206 <http://ipw2200.sf.net/>. See the above referenced README.ipw2200
204 for information on where to install the firmare images. 207 for information on where to install the firmware images.
205 208
206 You will also very likely need the Wireless Tools in order to 209 You will also very likely need the Wireless Tools in order to
207 configure your card: 210 configure your card:
@@ -213,6 +216,19 @@ config IPW2200
213 say M here and read <file:Documentation/modules.txt>. The module 216 say M here and read <file:Documentation/modules.txt>. The module
214 will be called ipw2200.ko. 217 will be called ipw2200.ko.
215 218
219config IPW2200_MONITOR
220 bool "Enable promiscuous mode"
221 depends on IPW2200
222 ---help---
223 Enables promiscuous/monitor mode support for the ipw2200 driver.
224 With this feature compiled into the driver, you can switch to
225 promiscuous mode via the Wireless Tool's Monitor mode. While in this
226 mode, no packets can be sent.
227
228config IPW_QOS
229 bool "Enable QoS support"
230 depends on IPW2200 && EXPERIMENTAL
231
216config IPW2200_DEBUG 232config IPW2200_DEBUG
217 bool "Enable full debugging output in IPW2200 module." 233 bool "Enable full debugging output in IPW2200 module."
218 depends on IPW2200 234 depends on IPW2200
@@ -239,13 +255,14 @@ config IPW2200_DEBUG
239 255
240config AIRO 256config AIRO
241 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 257 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
242 depends on NET_RADIO && ISA_DMA_API && CRYPTO && (PCI || BROKEN) 258 depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN)
259 select CRYPTO
243 ---help--- 260 ---help---
244 This is the standard Linux driver to support Cisco/Aironet ISA and 261 This is the standard Linux driver to support Cisco/Aironet ISA and
245 PCI 802.11 wireless cards. 262 PCI 802.11 wireless cards.
246 It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X 263 It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
247 - with or without encryption) as well as card before the Cisco 264 - with or without encryption) as well as card before the Cisco
248 aquisition (Aironet 4500, Aironet 4800, Aironet 4800B). 265 acquisition (Aironet 4500, Aironet 4800, Aironet 4800B).
249 266
250 This driver support both the standard Linux Wireless Extensions 267 This driver support both the standard Linux Wireless Extensions
251 and Cisco proprietary API, so both the Linux Wireless Tools and the 268 and Cisco proprietary API, so both the Linux Wireless Tools and the
@@ -387,13 +404,14 @@ config PCMCIA_SPECTRUM
387config AIRO_CS 404config AIRO_CS
388 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 405 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
389 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) 406 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
407 select CRYPTO
390 ---help--- 408 ---help---
391 This is the standard Linux driver to support Cisco/Aironet PCMCIA 409 This is the standard Linux driver to support Cisco/Aironet PCMCIA
392 802.11 wireless cards. This driver is the same as the Aironet 410 802.11 wireless cards. This driver is the same as the Aironet
393 driver part of the Linux Pcmcia package. 411 driver part of the Linux Pcmcia package.
394 It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X 412 It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
395 - with or without encryption) as well as card before the Cisco 413 - with or without encryption) as well as card before the Cisco
396 aquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also 414 acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also
397 supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom 415 supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom
398 802.11b cards. 416 802.11b cards.
399 417
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a4c7ae94614d..864937a409e5 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -36,6 +36,7 @@
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/bitops.h> 37#include <linux/bitops.h>
38#include <linux/scatterlist.h> 38#include <linux/scatterlist.h>
39#include <linux/crypto.h>
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/system.h> 41#include <asm/system.h>
41 42
@@ -87,14 +88,6 @@ static struct pci_driver airo_driver = {
87#include <linux/delay.h> 88#include <linux/delay.h>
88#endif 89#endif
89 90
90/* Support Cisco MIC feature */
91#define MICSUPPORT
92
93#if defined(MICSUPPORT) && !defined(CONFIG_CRYPTO)
94#warning MIC support requires Crypto API
95#undef MICSUPPORT
96#endif
97
98/* Hack to do some power saving */ 91/* Hack to do some power saving */
99#define POWER_ON_DOWN 92#define POWER_ON_DOWN
100 93
@@ -1118,7 +1111,6 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp);
1118static int writerids(struct net_device *dev, aironet_ioctl *comp); 1111static int writerids(struct net_device *dev, aironet_ioctl *comp);
1119static int flashcard(struct net_device *dev, aironet_ioctl *comp); 1112static int flashcard(struct net_device *dev, aironet_ioctl *comp);
1120#endif /* CISCO_EXT */ 1113#endif /* CISCO_EXT */
1121#ifdef MICSUPPORT
1122static void micinit(struct airo_info *ai); 1114static void micinit(struct airo_info *ai);
1123static int micsetup(struct airo_info *ai); 1115static int micsetup(struct airo_info *ai);
1124static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len); 1116static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len);
@@ -1127,9 +1119,6 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket,
1127static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); 1119static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi);
1128static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); 1120static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm);
1129 1121
1130#include <linux/crypto.h>
1131#endif
1132
1133struct airo_info { 1122struct airo_info {
1134 struct net_device_stats stats; 1123 struct net_device_stats stats;
1135 struct net_device *dev; 1124 struct net_device *dev;
@@ -1190,12 +1179,10 @@ struct airo_info {
1190 unsigned long scan_timestamp; /* Time started to scan */ 1179 unsigned long scan_timestamp; /* Time started to scan */
1191 struct iw_spy_data spy_data; 1180 struct iw_spy_data spy_data;
1192 struct iw_public_data wireless_data; 1181 struct iw_public_data wireless_data;
1193#ifdef MICSUPPORT
1194 /* MIC stuff */ 1182 /* MIC stuff */
1195 struct crypto_tfm *tfm; 1183 struct crypto_tfm *tfm;
1196 mic_module mod[2]; 1184 mic_module mod[2];
1197 mic_statistics micstats; 1185 mic_statistics micstats;
1198#endif
1199 HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors 1186 HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
1200 HostTxDesc txfids[MPI_MAX_FIDS]; 1187 HostTxDesc txfids[MPI_MAX_FIDS];
1201 HostRidDesc config_desc; 1188 HostRidDesc config_desc;
@@ -1229,7 +1216,6 @@ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime);
1229static int flashputbuf(struct airo_info *ai); 1216static int flashputbuf(struct airo_info *ai);
1230static int flashrestart(struct airo_info *ai,struct net_device *dev); 1217static int flashrestart(struct airo_info *ai,struct net_device *dev);
1231 1218
1232#ifdef MICSUPPORT
1233/*********************************************************************** 1219/***********************************************************************
1234 * MIC ROUTINES * 1220 * MIC ROUTINES *
1235 *********************************************************************** 1221 ***********************************************************************
@@ -1686,7 +1672,6 @@ static void emmh32_final(emmh32_context *context, u8 digest[4])
1686 digest[2] = (val>>8) & 0xFF; 1672 digest[2] = (val>>8) & 0xFF;
1687 digest[3] = val & 0xFF; 1673 digest[3] = val & 0xFF;
1688} 1674}
1689#endif
1690 1675
1691static int readBSSListRid(struct airo_info *ai, int first, 1676static int readBSSListRid(struct airo_info *ai, int first,
1692 BSSListRid *list) { 1677 BSSListRid *list) {
@@ -2005,7 +1990,6 @@ static int mpi_send_packet (struct net_device *dev)
2005 * Firmware automaticly puts 802 header on so 1990 * Firmware automaticly puts 802 header on so
2006 * we don't need to account for it in the length 1991 * we don't need to account for it in the length
2007 */ 1992 */
2008#ifdef MICSUPPORT
2009 if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && 1993 if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
2010 (ntohs(((u16 *)buffer)[6]) != 0x888E)) { 1994 (ntohs(((u16 *)buffer)[6]) != 0x888E)) {
2011 MICBuffer pMic; 1995 MICBuffer pMic;
@@ -2022,9 +2006,7 @@ static int mpi_send_packet (struct net_device *dev)
2022 memcpy (sendbuf, &pMic, sizeof(pMic)); 2006 memcpy (sendbuf, &pMic, sizeof(pMic));
2023 sendbuf += sizeof(pMic); 2007 sendbuf += sizeof(pMic);
2024 memcpy (sendbuf, buffer, len - sizeof(etherHead)); 2008 memcpy (sendbuf, buffer, len - sizeof(etherHead));
2025 } else 2009 } else {
2026#endif
2027 {
2028 *payloadLen = cpu_to_le16(len - sizeof(etherHead)); 2010 *payloadLen = cpu_to_le16(len - sizeof(etherHead));
2029 2011
2030 dev->trans_start = jiffies; 2012 dev->trans_start = jiffies;
@@ -2400,9 +2382,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
2400 ai->shared, ai->shared_dma); 2382 ai->shared, ai->shared_dma);
2401 } 2383 }
2402 } 2384 }
2403#ifdef MICSUPPORT
2404 crypto_free_tfm(ai->tfm); 2385 crypto_free_tfm(ai->tfm);
2405#endif
2406 del_airo_dev( dev ); 2386 del_airo_dev( dev );
2407 free_netdev( dev ); 2387 free_netdev( dev );
2408} 2388}
@@ -2726,9 +2706,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2726 ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES); 2706 ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES);
2727 if (ai->thr_pid < 0) 2707 if (ai->thr_pid < 0)
2728 goto err_out_free; 2708 goto err_out_free;
2729#ifdef MICSUPPORT
2730 ai->tfm = NULL; 2709 ai->tfm = NULL;
2731#endif
2732 rc = add_airo_dev( dev ); 2710 rc = add_airo_dev( dev );
2733 if (rc) 2711 if (rc)
2734 goto err_out_thr; 2712 goto err_out_thr;
@@ -2969,10 +2947,8 @@ static int airo_thread(void *data) {
2969 airo_read_wireless_stats(ai); 2947 airo_read_wireless_stats(ai);
2970 else if (test_bit(JOB_PROMISC, &ai->flags)) 2948 else if (test_bit(JOB_PROMISC, &ai->flags))
2971 airo_set_promisc(ai); 2949 airo_set_promisc(ai);
2972#ifdef MICSUPPORT
2973 else if (test_bit(JOB_MIC, &ai->flags)) 2950 else if (test_bit(JOB_MIC, &ai->flags))
2974 micinit(ai); 2951 micinit(ai);
2975#endif
2976 else if (test_bit(JOB_EVENT, &ai->flags)) 2952 else if (test_bit(JOB_EVENT, &ai->flags))
2977 airo_send_event(dev); 2953 airo_send_event(dev);
2978 else if (test_bit(JOB_AUTOWEP, &ai->flags)) 2954 else if (test_bit(JOB_AUTOWEP, &ai->flags))
@@ -3010,12 +2986,10 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3010 2986
3011 if ( status & EV_MIC ) { 2987 if ( status & EV_MIC ) {
3012 OUT4500( apriv, EVACK, EV_MIC ); 2988 OUT4500( apriv, EVACK, EV_MIC );
3013#ifdef MICSUPPORT
3014 if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { 2989 if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) {
3015 set_bit(JOB_MIC, &apriv->flags); 2990 set_bit(JOB_MIC, &apriv->flags);
3016 wake_up_interruptible(&apriv->thr_wait); 2991 wake_up_interruptible(&apriv->thr_wait);
3017 } 2992 }
3018#endif
3019 } 2993 }
3020 if ( status & EV_LINK ) { 2994 if ( status & EV_LINK ) {
3021 union iwreq_data wrqu; 2995 union iwreq_data wrqu;
@@ -3194,11 +3168,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3194 } 3168 }
3195 bap_read (apriv, buffer + hdrlen/2, len, BAP0); 3169 bap_read (apriv, buffer + hdrlen/2, len, BAP0);
3196 } else { 3170 } else {
3197#ifdef MICSUPPORT
3198 MICBuffer micbuf; 3171 MICBuffer micbuf;
3199#endif
3200 bap_read (apriv, buffer, ETH_ALEN*2, BAP0); 3172 bap_read (apriv, buffer, ETH_ALEN*2, BAP0);
3201#ifdef MICSUPPORT
3202 if (apriv->micstats.enabled) { 3173 if (apriv->micstats.enabled) {
3203 bap_read (apriv,(u16*)&micbuf,sizeof(micbuf),BAP0); 3174 bap_read (apriv,(u16*)&micbuf,sizeof(micbuf),BAP0);
3204 if (ntohs(micbuf.typelen) > 0x05DC) 3175 if (ntohs(micbuf.typelen) > 0x05DC)
@@ -3211,15 +3182,10 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3211 skb_trim (skb, len + hdrlen); 3182 skb_trim (skb, len + hdrlen);
3212 } 3183 }
3213 } 3184 }
3214#endif
3215 bap_read(apriv,buffer+ETH_ALEN,len,BAP0); 3185 bap_read(apriv,buffer+ETH_ALEN,len,BAP0);
3216#ifdef MICSUPPORT
3217 if (decapsulate(apriv,&micbuf,(etherHead*)buffer,len)) { 3186 if (decapsulate(apriv,&micbuf,(etherHead*)buffer,len)) {
3218badmic: 3187badmic:
3219 dev_kfree_skb_irq (skb); 3188 dev_kfree_skb_irq (skb);
3220#else
3221 if (0) {
3222#endif
3223badrx: 3189badrx:
3224 OUT4500( apriv, EVACK, EV_RX); 3190 OUT4500( apriv, EVACK, EV_RX);
3225 goto exitrx; 3191 goto exitrx;
@@ -3430,10 +3396,8 @@ static void mpi_receive_802_3(struct airo_info *ai)
3430 int len = 0; 3396 int len = 0;
3431 struct sk_buff *skb; 3397 struct sk_buff *skb;
3432 char *buffer; 3398 char *buffer;
3433#ifdef MICSUPPORT
3434 int off = 0; 3399 int off = 0;
3435 MICBuffer micbuf; 3400 MICBuffer micbuf;
3436#endif
3437 3401
3438 memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); 3402 memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd));
3439 /* Make sure we got something */ 3403 /* Make sure we got something */
@@ -3448,7 +3412,6 @@ static void mpi_receive_802_3(struct airo_info *ai)
3448 goto badrx; 3412 goto badrx;
3449 } 3413 }
3450 buffer = skb_put(skb,len); 3414 buffer = skb_put(skb,len);
3451#ifdef MICSUPPORT
3452 memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2); 3415 memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2);
3453 if (ai->micstats.enabled) { 3416 if (ai->micstats.enabled) {
3454 memcpy(&micbuf, 3417 memcpy(&micbuf,
@@ -3470,9 +3433,6 @@ badmic:
3470 dev_kfree_skb_irq (skb); 3433 dev_kfree_skb_irq (skb);
3471 goto badrx; 3434 goto badrx;
3472 } 3435 }
3473#else
3474 memcpy(buffer, ai->rxfids[0].virtual_host_addr, len);
3475#endif
3476#ifdef WIRELESS_SPY 3436#ifdef WIRELESS_SPY
3477 if (ai->spy_data.spy_number > 0) { 3437 if (ai->spy_data.spy_number > 0) {
3478 char *sa; 3438 char *sa;
@@ -3689,13 +3649,11 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3689 ai->config.authType = AUTH_OPEN; 3649 ai->config.authType = AUTH_OPEN;
3690 ai->config.modulation = MOD_CCK; 3650 ai->config.modulation = MOD_CCK;
3691 3651
3692#ifdef MICSUPPORT
3693 if ((cap_rid.len>=sizeof(cap_rid)) && (cap_rid.extSoftCap&1) && 3652 if ((cap_rid.len>=sizeof(cap_rid)) && (cap_rid.extSoftCap&1) &&
3694 (micsetup(ai) == SUCCESS)) { 3653 (micsetup(ai) == SUCCESS)) {
3695 ai->config.opmode |= MODE_MIC; 3654 ai->config.opmode |= MODE_MIC;
3696 set_bit(FLAG_MIC_CAPABLE, &ai->flags); 3655 set_bit(FLAG_MIC_CAPABLE, &ai->flags);
3697 } 3656 }
3698#endif
3699 3657
3700 /* Save off the MAC */ 3658 /* Save off the MAC */
3701 for( i = 0; i < ETH_ALEN; i++ ) { 3659 for( i = 0; i < ETH_ALEN; i++ ) {
@@ -4170,15 +4128,12 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
4170 } 4128 }
4171 len -= ETH_ALEN * 2; 4129 len -= ETH_ALEN * 2;
4172 4130
4173#ifdef MICSUPPORT
4174 if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && 4131 if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
4175 (ntohs(((u16 *)pPacket)[6]) != 0x888E)) { 4132 (ntohs(((u16 *)pPacket)[6]) != 0x888E)) {
4176 if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS) 4133 if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS)
4177 return ERROR; 4134 return ERROR;
4178 miclen = sizeof(pMic); 4135 miclen = sizeof(pMic);
4179 } 4136 }
4180#endif
4181
4182 // packet is destination[6], source[6], payload[len-12] 4137 // packet is destination[6], source[6], payload[len-12]
4183 // write the payload length and dst/src/payload 4138 // write the payload length and dst/src/payload
4184 if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR; 4139 if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR;
@@ -5081,7 +5036,6 @@ static int set_wep_key(struct airo_info *ai, u16 index,
5081 wkr.len = sizeof(wkr); 5036 wkr.len = sizeof(wkr);
5082 wkr.kindex = 0xffff; 5037 wkr.kindex = 0xffff;
5083 wkr.mac[0] = (char)index; 5038 wkr.mac[0] = (char)index;
5084 if (perm) printk(KERN_INFO "Setting transmit key to %d\n", index);
5085 if (perm) ai->defindex = (char)index; 5039 if (perm) ai->defindex = (char)index;
5086 } else { 5040 } else {
5087// We are actually setting the key 5041// We are actually setting the key
@@ -5090,7 +5044,6 @@ static int set_wep_key(struct airo_info *ai, u16 index,
5090 wkr.klen = keylen; 5044 wkr.klen = keylen;
5091 memcpy( wkr.key, key, keylen ); 5045 memcpy( wkr.key, key, keylen );
5092 memcpy( wkr.mac, macaddr, ETH_ALEN ); 5046 memcpy( wkr.mac, macaddr, ETH_ALEN );
5093 printk(KERN_INFO "Setting key %d\n", index);
5094 } 5047 }
5095 5048
5096 if (perm) disable_MAC(ai, lock); 5049 if (perm) disable_MAC(ai, lock);
@@ -5801,11 +5754,13 @@ static int airo_set_wap(struct net_device *dev,
5801 Cmd cmd; 5754 Cmd cmd;
5802 Resp rsp; 5755 Resp rsp;
5803 APListRid APList_rid; 5756 APListRid APList_rid;
5804 static const unsigned char bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 }; 5757 static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5758 static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
5805 5759
5806 if (awrq->sa_family != ARPHRD_ETHER) 5760 if (awrq->sa_family != ARPHRD_ETHER)
5807 return -EINVAL; 5761 return -EINVAL;
5808 else if (!memcmp(bcast, awrq->sa_data, ETH_ALEN)) { 5762 else if (!memcmp(any, awrq->sa_data, ETH_ALEN) ||
5763 !memcmp(off, awrq->sa_data, ETH_ALEN)) {
5809 memset(&cmd, 0, sizeof(cmd)); 5764 memset(&cmd, 0, sizeof(cmd));
5810 cmd.cmd=CMD_LOSE_SYNC; 5765 cmd.cmd=CMD_LOSE_SYNC;
5811 if (down_interruptible(&local->sem)) 5766 if (down_interruptible(&local->sem))
@@ -6296,6 +6251,272 @@ static int airo_get_encode(struct net_device *dev,
6296 6251
6297/*------------------------------------------------------------------*/ 6252/*------------------------------------------------------------------*/
6298/* 6253/*
6254 * Wireless Handler : set extended Encryption parameters
6255 */
6256static int airo_set_encodeext(struct net_device *dev,
6257 struct iw_request_info *info,
6258 union iwreq_data *wrqu,
6259 char *extra)
6260{
6261 struct airo_info *local = dev->priv;
6262 struct iw_point *encoding = &wrqu->encoding;
6263 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6264 CapabilityRid cap_rid; /* Card capability info */
6265 int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 );
6266 u16 currentAuthType = local->config.authType;
6267 int idx, key_len, alg = ext->alg, set_key = 1;
6268 wep_key_t key;
6269
6270 /* Is WEP supported ? */
6271 readCapabilityRid(local, &cap_rid, 1);
6272 /* Older firmware doesn't support this...
6273 if(!(cap_rid.softCap & 2)) {
6274 return -EOPNOTSUPP;
6275 } */
6276 readConfigRid(local, 1);
6277
6278 /* Determine and validate the key index */
6279 idx = encoding->flags & IW_ENCODE_INDEX;
6280 if (idx) {
6281 if (idx < 1 || idx > ((cap_rid.softCap & 0x80) ? 4:1))
6282 return -EINVAL;
6283 idx--;
6284 } else
6285 idx = get_wep_key(local, 0xffff);
6286
6287 if (encoding->flags & IW_ENCODE_DISABLED)
6288 alg = IW_ENCODE_ALG_NONE;
6289
6290 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
6291 /* Only set transmit key index here, actual
6292 * key is set below if needed.
6293 */
6294 set_wep_key(local, idx, NULL, 0, perm, 1);
6295 set_key = ext->key_len > 0 ? 1 : 0;
6296 }
6297
6298 if (set_key) {
6299 /* Set the requested key first */
6300 memset(key.key, 0, MAX_KEY_SIZE);
6301 switch (alg) {
6302 case IW_ENCODE_ALG_NONE:
6303 key.len = 0;
6304 break;
6305 case IW_ENCODE_ALG_WEP:
6306 if (ext->key_len > MIN_KEY_SIZE) {
6307 key.len = MAX_KEY_SIZE;
6308 } else if (ext->key_len > 0) {
6309 key.len = MIN_KEY_SIZE;
6310 } else {
6311 return -EINVAL;
6312 }
6313 key_len = min (ext->key_len, key.len);
6314 memcpy(key.key, ext->key, key_len);
6315 break;
6316 default:
6317 return -EINVAL;
6318 }
6319 /* Send the key to the card */
6320 set_wep_key(local, idx, key.key, key.len, perm, 1);
6321 }
6322
6323 /* Read the flags */
6324 if(encoding->flags & IW_ENCODE_DISABLED)
6325 local->config.authType = AUTH_OPEN; // disable encryption
6326 if(encoding->flags & IW_ENCODE_RESTRICTED)
6327 local->config.authType = AUTH_SHAREDKEY; // Only Both
6328 if(encoding->flags & IW_ENCODE_OPEN)
6329 local->config.authType = AUTH_ENCRYPT; // Only Wep
6330 /* Commit the changes to flags if needed */
6331 if (local->config.authType != currentAuthType)
6332 set_bit (FLAG_COMMIT, &local->flags);
6333
6334 return -EINPROGRESS;
6335}
6336
6337
6338/*------------------------------------------------------------------*/
6339/*
6340 * Wireless Handler : get extended Encryption parameters
6341 */
6342static int airo_get_encodeext(struct net_device *dev,
6343 struct iw_request_info *info,
6344 union iwreq_data *wrqu,
6345 char *extra)
6346{
6347 struct airo_info *local = dev->priv;
6348 struct iw_point *encoding = &wrqu->encoding;
6349 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6350 CapabilityRid cap_rid; /* Card capability info */
6351 int idx, max_key_len;
6352
6353 /* Is it supported ? */
6354 readCapabilityRid(local, &cap_rid, 1);
6355 if(!(cap_rid.softCap & 2)) {
6356 return -EOPNOTSUPP;
6357 }
6358 readConfigRid(local, 1);
6359
6360 max_key_len = encoding->length - sizeof(*ext);
6361 if (max_key_len < 0)
6362 return -EINVAL;
6363
6364 idx = encoding->flags & IW_ENCODE_INDEX;
6365 if (idx) {
6366 if (idx < 1 || idx > ((cap_rid.softCap & 0x80) ? 4:1))
6367 return -EINVAL;
6368 idx--;
6369 } else
6370 idx = get_wep_key(local, 0xffff);
6371
6372 encoding->flags = idx + 1;
6373 memset(ext, 0, sizeof(*ext));
6374
6375 /* Check encryption mode */
6376 switch(local->config.authType) {
6377 case AUTH_ENCRYPT:
6378 encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED;
6379 break;
6380 case AUTH_SHAREDKEY:
6381 encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED;
6382 break;
6383 default:
6384 case AUTH_OPEN:
6385 encoding->flags = IW_ENCODE_ALG_NONE | IW_ENCODE_DISABLED;
6386 break;
6387 }
6388 /* We can't return the key, so set the proper flag and return zero */
6389 encoding->flags |= IW_ENCODE_NOKEY;
6390 memset(extra, 0, 16);
6391
6392 /* Copy the key to the user buffer */
6393 ext->key_len = get_wep_key(local, idx);
6394 if (ext->key_len > 16) {
6395 ext->key_len=0;
6396 }
6397
6398 return 0;
6399}
6400
6401
6402/*------------------------------------------------------------------*/
6403/*
6404 * Wireless Handler : set extended authentication parameters
6405 */
6406static int airo_set_auth(struct net_device *dev,
6407 struct iw_request_info *info,
6408 union iwreq_data *wrqu, char *extra)
6409{
6410 struct airo_info *local = dev->priv;
6411 struct iw_param *param = &wrqu->param;
6412 u16 currentAuthType = local->config.authType;
6413
6414 switch (param->flags & IW_AUTH_INDEX) {
6415 case IW_AUTH_WPA_VERSION:
6416 case IW_AUTH_CIPHER_PAIRWISE:
6417 case IW_AUTH_CIPHER_GROUP:
6418 case IW_AUTH_KEY_MGMT:
6419 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6420 case IW_AUTH_PRIVACY_INVOKED:
6421 /*
6422 * airo does not use these parameters
6423 */
6424 break;
6425
6426 case IW_AUTH_DROP_UNENCRYPTED:
6427 if (param->value) {
6428 /* Only change auth type if unencrypted */
6429 if (currentAuthType == AUTH_OPEN)
6430 local->config.authType = AUTH_ENCRYPT;
6431 } else {
6432 local->config.authType = AUTH_OPEN;
6433 }
6434
6435 /* Commit the changes to flags if needed */
6436 if (local->config.authType != currentAuthType)
6437 set_bit (FLAG_COMMIT, &local->flags);
6438 break;
6439
6440 case IW_AUTH_80211_AUTH_ALG: {
6441 /* FIXME: What about AUTH_OPEN? This API seems to
6442 * disallow setting our auth to AUTH_OPEN.
6443 */
6444 if (param->value & IW_AUTH_ALG_SHARED_KEY) {
6445 local->config.authType = AUTH_SHAREDKEY;
6446 } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
6447 local->config.authType = AUTH_ENCRYPT;
6448 } else
6449 return -EINVAL;
6450 break;
6451
6452 /* Commit the changes to flags if needed */
6453 if (local->config.authType != currentAuthType)
6454 set_bit (FLAG_COMMIT, &local->flags);
6455 }
6456
6457 case IW_AUTH_WPA_ENABLED:
6458 /* Silently accept disable of WPA */
6459 if (param->value > 0)
6460 return -EOPNOTSUPP;
6461 break;
6462
6463 default:
6464 return -EOPNOTSUPP;
6465 }
6466 return -EINPROGRESS;
6467}
6468
6469
6470/*------------------------------------------------------------------*/
6471/*
6472 * Wireless Handler : get extended authentication parameters
6473 */
6474static int airo_get_auth(struct net_device *dev,
6475 struct iw_request_info *info,
6476 union iwreq_data *wrqu, char *extra)
6477{
6478 struct airo_info *local = dev->priv;
6479 struct iw_param *param = &wrqu->param;
6480 u16 currentAuthType = local->config.authType;
6481
6482 switch (param->flags & IW_AUTH_INDEX) {
6483 case IW_AUTH_DROP_UNENCRYPTED:
6484 switch (currentAuthType) {
6485 case AUTH_SHAREDKEY:
6486 case AUTH_ENCRYPT:
6487 param->value = 1;
6488 break;
6489 default:
6490 param->value = 0;
6491 break;
6492 }
6493 break;
6494
6495 case IW_AUTH_80211_AUTH_ALG:
6496 switch (currentAuthType) {
6497 case AUTH_SHAREDKEY:
6498 param->value = IW_AUTH_ALG_SHARED_KEY;
6499 break;
6500 case AUTH_ENCRYPT:
6501 default:
6502 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
6503 break;
6504 }
6505 break;
6506
6507 case IW_AUTH_WPA_ENABLED:
6508 param->value = 0;
6509 break;
6510
6511 default:
6512 return -EOPNOTSUPP;
6513 }
6514 return 0;
6515}
6516
6517
6518/*------------------------------------------------------------------*/
6519/*
6299 * Wireless Handler : set Tx-Power 6520 * Wireless Handler : set Tx-Power
6300 */ 6521 */
6301static int airo_set_txpow(struct net_device *dev, 6522static int airo_set_txpow(struct net_device *dev,
@@ -7050,6 +7271,15 @@ static const iw_handler airo_handler[] =
7050 (iw_handler) airo_get_encode, /* SIOCGIWENCODE */ 7271 (iw_handler) airo_get_encode, /* SIOCGIWENCODE */
7051 (iw_handler) airo_set_power, /* SIOCSIWPOWER */ 7272 (iw_handler) airo_set_power, /* SIOCSIWPOWER */
7052 (iw_handler) airo_get_power, /* SIOCGIWPOWER */ 7273 (iw_handler) airo_get_power, /* SIOCGIWPOWER */
7274 (iw_handler) NULL, /* -- hole -- */
7275 (iw_handler) NULL, /* -- hole -- */
7276 (iw_handler) NULL, /* SIOCSIWGENIE */
7277 (iw_handler) NULL, /* SIOCGIWGENIE */
7278 (iw_handler) airo_set_auth, /* SIOCSIWAUTH */
7279 (iw_handler) airo_get_auth, /* SIOCGIWAUTH */
7280 (iw_handler) airo_set_encodeext, /* SIOCSIWENCODEEXT */
7281 (iw_handler) airo_get_encodeext, /* SIOCGIWENCODEEXT */
7282 (iw_handler) NULL, /* SIOCSIWPMKSA */
7053}; 7283};
7054 7284
7055/* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here. 7285/* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here.
@@ -7270,13 +7500,11 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
7270 case AIROGSTAT: ridcode = RID_STATUS; break; 7500 case AIROGSTAT: ridcode = RID_STATUS; break;
7271 case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; 7501 case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
7272 case AIROGSTATSC32: ridcode = RID_STATS; break; 7502 case AIROGSTATSC32: ridcode = RID_STATS; break;
7273#ifdef MICSUPPORT
7274 case AIROGMICSTATS: 7503 case AIROGMICSTATS:
7275 if (copy_to_user(comp->data, &ai->micstats, 7504 if (copy_to_user(comp->data, &ai->micstats,
7276 min((int)comp->len,(int)sizeof(ai->micstats)))) 7505 min((int)comp->len,(int)sizeof(ai->micstats))))
7277 return -EFAULT; 7506 return -EFAULT;
7278 return 0; 7507 return 0;
7279#endif
7280 case AIRORRID: ridcode = comp->ridnum; break; 7508 case AIRORRID: ridcode = comp->ridnum; break;
7281 default: 7509 default:
7282 return -EINVAL; 7510 return -EINVAL;
@@ -7308,9 +7536,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
7308static int writerids(struct net_device *dev, aironet_ioctl *comp) { 7536static int writerids(struct net_device *dev, aironet_ioctl *comp) {
7309 struct airo_info *ai = dev->priv; 7537 struct airo_info *ai = dev->priv;
7310 int ridcode; 7538 int ridcode;
7311#ifdef MICSUPPORT
7312 int enabled; 7539 int enabled;
7313#endif
7314 Resp rsp; 7540 Resp rsp;
7315 static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); 7541 static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
7316 unsigned char *iobuf; 7542 unsigned char *iobuf;
@@ -7367,11 +7593,9 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
7367 7593
7368 PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1); 7594 PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1);
7369 7595
7370#ifdef MICSUPPORT
7371 enabled = ai->micstats.enabled; 7596 enabled = ai->micstats.enabled;
7372 memset(&ai->micstats,0,sizeof(ai->micstats)); 7597 memset(&ai->micstats,0,sizeof(ai->micstats));
7373 ai->micstats.enabled = enabled; 7598 ai->micstats.enabled = enabled;
7374#endif
7375 7599
7376 if (copy_to_user(comp->data, iobuf, 7600 if (copy_to_user(comp->data, iobuf,
7377 min((int)comp->len, (int)RIDSIZE))) { 7601 min((int)comp->len, (int)RIDSIZE))) {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index dfc24016ba81..87afa6878f26 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -137,44 +137,6 @@ static struct {
137#define MAC_BOOT_COMPLETE 0x0010 // MAC boot has been completed 137#define MAC_BOOT_COMPLETE 0x0010 // MAC boot has been completed
138#define MAC_INIT_OK 0x0002 // MAC boot has been completed 138#define MAC_INIT_OK 0x0002 // MAC boot has been completed
139 139
140#define C80211_SUBTYPE_MGMT_ASS_REQUEST 0x00
141#define C80211_SUBTYPE_MGMT_ASS_RESPONSE 0x10
142#define C80211_SUBTYPE_MGMT_REASS_REQUEST 0x20
143#define C80211_SUBTYPE_MGMT_REASS_RESPONSE 0x30
144#define C80211_SUBTYPE_MGMT_ProbeRequest 0x40
145#define C80211_SUBTYPE_MGMT_ProbeResponse 0x50
146#define C80211_SUBTYPE_MGMT_BEACON 0x80
147#define C80211_SUBTYPE_MGMT_ATIM 0x90
148#define C80211_SUBTYPE_MGMT_DISASSOSIATION 0xA0
149#define C80211_SUBTYPE_MGMT_Authentication 0xB0
150#define C80211_SUBTYPE_MGMT_Deauthentication 0xC0
151
152#define C80211_MGMT_AAN_OPENSYSTEM 0x0000
153#define C80211_MGMT_AAN_SHAREDKEY 0x0001
154
155#define C80211_MGMT_CAPABILITY_ESS 0x0001 // see 802.11 p.58
156#define C80211_MGMT_CAPABILITY_IBSS 0x0002 // - " -
157#define C80211_MGMT_CAPABILITY_CFPollable 0x0004 // - " -
158#define C80211_MGMT_CAPABILITY_CFPollRequest 0x0008 // - " -
159#define C80211_MGMT_CAPABILITY_Privacy 0x0010 // - " -
160
161#define C80211_MGMT_SC_Success 0
162#define C80211_MGMT_SC_Unspecified 1
163#define C80211_MGMT_SC_SupportCapabilities 10
164#define C80211_MGMT_SC_ReassDenied 11
165#define C80211_MGMT_SC_AssDenied 12
166#define C80211_MGMT_SC_AuthAlgNotSupported 13
167#define C80211_MGMT_SC_AuthTransSeqNumError 14
168#define C80211_MGMT_SC_AuthRejectChallenge 15
169#define C80211_MGMT_SC_AuthRejectTimeout 16
170#define C80211_MGMT_SC_AssDeniedHandleAP 17
171#define C80211_MGMT_SC_AssDeniedBSSRate 18
172
173#define C80211_MGMT_ElementID_SSID 0
174#define C80211_MGMT_ElementID_SupportedRates 1
175#define C80211_MGMT_ElementID_ChallengeText 16
176#define C80211_MGMT_CAPABILITY_ShortPreamble 0x0020
177
178#define MIB_MAX_DATA_BYTES 212 140#define MIB_MAX_DATA_BYTES 212
179#define MIB_HEADER_SIZE 4 /* first four fields */ 141#define MIB_HEADER_SIZE 4 /* first four fields */
180 142
@@ -2835,7 +2797,7 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability,
2835 u8 channel) 2797 u8 channel)
2836{ 2798{
2837 int rejoin = 0; 2799 int rejoin = 0;
2838 int new = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? 2800 int new = capability & MFIE_TYPE_POWER_CONSTRAINT ?
2839 SHORT_PREAMBLE : LONG_PREAMBLE; 2801 SHORT_PREAMBLE : LONG_PREAMBLE;
2840 2802
2841 if (priv->preamble != new) { 2803 if (priv->preamble != new) {
@@ -2921,11 +2883,11 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2921 memcpy(header.addr2, priv->dev->dev_addr, 6); 2883 memcpy(header.addr2, priv->dev->dev_addr, 6);
2922 memcpy(header.addr3, priv->CurrentBSSID, 6); 2884 memcpy(header.addr3, priv->CurrentBSSID, 6);
2923 2885
2924 body.capability = cpu_to_le16(C80211_MGMT_CAPABILITY_ESS); 2886 body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
2925 if (priv->wep_is_on) 2887 if (priv->wep_is_on)
2926 body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_Privacy); 2888 body.capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
2927 if (priv->preamble == SHORT_PREAMBLE) 2889 if (priv->preamble == SHORT_PREAMBLE)
2928 body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_ShortPreamble); 2890 body.capability |= cpu_to_le16(MFIE_TYPE_POWER_CONSTRAINT);
2929 2891
2930 body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period); 2892 body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period);
2931 2893
@@ -2939,10 +2901,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2939 bodysize = 12 + priv->SSID_size; 2901 bodysize = 12 + priv->SSID_size;
2940 } 2902 }
2941 2903
2942 ssid_el_p[0] = C80211_MGMT_ElementID_SSID; 2904 ssid_el_p[0] = MFIE_TYPE_SSID;
2943 ssid_el_p[1] = priv->SSID_size; 2905 ssid_el_p[1] = priv->SSID_size;
2944 memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size); 2906 memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
2945 ssid_el_p[2 + priv->SSID_size] = C80211_MGMT_ElementID_SupportedRates; 2907 ssid_el_p[2 + priv->SSID_size] = MFIE_TYPE_RATES;
2946 ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */ 2908 ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */
2947 memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4); 2909 memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4);
2948 2910
@@ -3004,7 +2966,7 @@ static void store_bss_info(struct atmel_private *priv,
3004 u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len, 2966 u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len,
3005 u8 *ssid, int is_beacon) 2967 u8 *ssid, int is_beacon)
3006{ 2968{
3007 u8 *bss = capability & C80211_MGMT_CAPABILITY_ESS ? header->addr2 : header->addr3; 2969 u8 *bss = capability & WLAN_CAPABILITY_ESS ? header->addr2 : header->addr3;
3008 int i, index; 2970 int i, index;
3009 2971
3010 for (index = -1, i = 0; i < priv->BSS_list_entries; i++) 2972 for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
@@ -3030,16 +2992,16 @@ static void store_bss_info(struct atmel_private *priv,
3030 2992
3031 priv->BSSinfo[index].channel = channel; 2993 priv->BSSinfo[index].channel = channel;
3032 priv->BSSinfo[index].beacon_period = beacon_period; 2994 priv->BSSinfo[index].beacon_period = beacon_period;
3033 priv->BSSinfo[index].UsingWEP = capability & C80211_MGMT_CAPABILITY_Privacy; 2995 priv->BSSinfo[index].UsingWEP = capability & WLAN_CAPABILITY_PRIVACY;
3034 memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len); 2996 memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len);
3035 priv->BSSinfo[index].SSIDsize = ssid_len; 2997 priv->BSSinfo[index].SSIDsize = ssid_len;
3036 2998
3037 if (capability & C80211_MGMT_CAPABILITY_IBSS) 2999 if (capability & WLAN_CAPABILITY_IBSS)
3038 priv->BSSinfo[index].BSStype = IW_MODE_ADHOC; 3000 priv->BSSinfo[index].BSStype = IW_MODE_ADHOC;
3039 else if (capability & C80211_MGMT_CAPABILITY_ESS) 3001 else if (capability & WLAN_CAPABILITY_ESS)
3040 priv->BSSinfo[index].BSStype =IW_MODE_INFRA; 3002 priv->BSSinfo[index].BSStype =IW_MODE_INFRA;
3041 3003
3042 priv->BSSinfo[index].preamble = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? 3004 priv->BSSinfo[index].preamble = capability & MFIE_TYPE_POWER_CONSTRAINT ?
3043 SHORT_PREAMBLE : LONG_PREAMBLE; 3005 SHORT_PREAMBLE : LONG_PREAMBLE;
3044} 3006}
3045 3007
@@ -3050,7 +3012,7 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
3050 u16 trans_seq_no = le16_to_cpu(auth->trans_seq); 3012 u16 trans_seq_no = le16_to_cpu(auth->trans_seq);
3051 u16 system = le16_to_cpu(auth->alg); 3013 u16 system = le16_to_cpu(auth->alg);
3052 3014
3053 if (status == C80211_MGMT_SC_Success && !priv->wep_is_on) { 3015 if (status == WLAN_STATUS_SUCCESS && !priv->wep_is_on) {
3054 /* no WEP */ 3016 /* no WEP */
3055 if (priv->station_was_associated) { 3017 if (priv->station_was_associated) {
3056 atmel_enter_state(priv, STATION_STATE_REASSOCIATING); 3018 atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
@@ -3063,19 +3025,19 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
3063 } 3025 }
3064 } 3026 }
3065 3027
3066 if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { 3028 if (status == WLAN_STATUS_SUCCESS && priv->wep_is_on) {
3067 int should_associate = 0; 3029 int should_associate = 0;
3068 /* WEP */ 3030 /* WEP */
3069 if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) 3031 if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum)
3070 return; 3032 return;
3071 3033
3072 if (system == C80211_MGMT_AAN_OPENSYSTEM) { 3034 if (system == WLAN_AUTH_OPEN) {
3073 if (trans_seq_no == 0x0002) { 3035 if (trans_seq_no == 0x0002) {
3074 should_associate = 1; 3036 should_associate = 1;
3075 } 3037 }
3076 } else if (system == C80211_MGMT_AAN_SHAREDKEY) { 3038 } else if (system == WLAN_AUTH_SHARED_KEY) {
3077 if (trans_seq_no == 0x0002 && 3039 if (trans_seq_no == 0x0002 &&
3078 auth->el_id == C80211_MGMT_ElementID_ChallengeText) { 3040 auth->el_id == MFIE_TYPE_CHALLENGE) {
3079 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); 3041 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
3080 return; 3042 return;
3081 } else if (trans_seq_no == 0x0004) { 3043 } else if (trans_seq_no == 0x0004) {
@@ -3140,8 +3102,8 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
3140 if (frame_len < 8 + rates_len) 3102 if (frame_len < 8 + rates_len)
3141 return; 3103 return;
3142 3104
3143 if (status == C80211_MGMT_SC_Success) { 3105 if (status == WLAN_STATUS_SUCCESS) {
3144 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE) 3106 if (subtype == IEEE80211_STYPE_ASSOC_RESP)
3145 priv->AssociationRequestRetryCnt = 0; 3107 priv->AssociationRequestRetryCnt = 0;
3146 else 3108 else
3147 priv->ReAssociationRequestRetryCnt = 0; 3109 priv->ReAssociationRequestRetryCnt = 0;
@@ -3178,9 +3140,9 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
3178 return; 3140 return;
3179 } 3141 }
3180 3142
3181 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE && 3143 if (subtype == IEEE80211_STYPE_ASSOC_RESP &&
3182 status != C80211_MGMT_SC_AssDeniedBSSRate && 3144 status != WLAN_STATUS_ASSOC_DENIED_RATES &&
3183 status != C80211_MGMT_SC_SupportCapabilities && 3145 status != WLAN_STATUS_CAPS_UNSUPPORTED &&
3184 priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { 3146 priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
3185 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3147 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3186 priv->AssociationRequestRetryCnt++; 3148 priv->AssociationRequestRetryCnt++;
@@ -3188,9 +3150,9 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
3188 return; 3150 return;
3189 } 3151 }
3190 3152
3191 if (subtype == C80211_SUBTYPE_MGMT_REASS_RESPONSE && 3153 if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
3192 status != C80211_MGMT_SC_AssDeniedBSSRate && 3154 status != WLAN_STATUS_ASSOC_DENIED_RATES &&
3193 status != C80211_MGMT_SC_SupportCapabilities && 3155 status != WLAN_STATUS_CAPS_UNSUPPORTED &&
3194 priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) { 3156 priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
3195 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3157 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3196 priv->ReAssociationRequestRetryCnt++; 3158 priv->ReAssociationRequestRetryCnt++;
@@ -3325,8 +3287,8 @@ static void atmel_management_frame(struct atmel_private *priv,
3325 3287
3326 subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE; 3288 subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE;
3327 switch (subtype) { 3289 switch (subtype) {
3328 case C80211_SUBTYPE_MGMT_BEACON: 3290 case IEEE80211_STYPE_BEACON:
3329 case C80211_SUBTYPE_MGMT_ProbeResponse: 3291 case IEEE80211_STYPE_PROBE_RESP:
3330 3292
3331 /* beacon frame has multiple variable-length fields - 3293 /* beacon frame has multiple variable-length fields -
3332 never let an engineer loose with a data structure design. */ 3294 never let an engineer loose with a data structure design. */
@@ -3384,19 +3346,19 @@ static void atmel_management_frame(struct atmel_private *priv,
3384 beacon_interval, channel, rssi, 3346 beacon_interval, channel, rssi,
3385 ssid_length, 3347 ssid_length,
3386 &beacon->rates_el_id, 3348 &beacon->rates_el_id,
3387 subtype == C80211_SUBTYPE_MGMT_BEACON); 3349 subtype == IEEE80211_STYPE_BEACON);
3388 } 3350 }
3389 break; 3351 break;
3390 3352
3391 case C80211_SUBTYPE_MGMT_Authentication: 3353 case IEEE80211_STYPE_AUTH:
3392 3354
3393 if (priv->station_state == STATION_STATE_AUTHENTICATING) 3355 if (priv->station_state == STATION_STATE_AUTHENTICATING)
3394 authenticate(priv, frame_len); 3356 authenticate(priv, frame_len);
3395 3357
3396 break; 3358 break;
3397 3359
3398 case C80211_SUBTYPE_MGMT_ASS_RESPONSE: 3360 case IEEE80211_STYPE_ASSOC_RESP:
3399 case C80211_SUBTYPE_MGMT_REASS_RESPONSE: 3361 case IEEE80211_STYPE_REASSOC_RESP:
3400 3362
3401 if (priv->station_state == STATION_STATE_ASSOCIATING || 3363 if (priv->station_state == STATION_STATE_ASSOCIATING ||
3402 priv->station_state == STATION_STATE_REASSOCIATING) 3364 priv->station_state == STATION_STATE_REASSOCIATING)
@@ -3404,7 +3366,7 @@ static void atmel_management_frame(struct atmel_private *priv,
3404 3366
3405 break; 3367 break;
3406 3368
3407 case C80211_SUBTYPE_MGMT_DISASSOSIATION: 3369 case IEEE80211_STYPE_DISASSOC:
3408 if (priv->station_is_associated && 3370 if (priv->station_is_associated &&
3409 priv->operating_mode == IW_MODE_INFRA && 3371 priv->operating_mode == IW_MODE_INFRA &&
3410 is_frame_from_current_bss(priv, header)) { 3372 is_frame_from_current_bss(priv, header)) {
@@ -3417,7 +3379,7 @@ static void atmel_management_frame(struct atmel_private *priv,
3417 3379
3418 break; 3380 break;
3419 3381
3420 case C80211_SUBTYPE_MGMT_Deauthentication: 3382 case IEEE80211_STYPE_DEAUTH:
3421 if (priv->operating_mode == IW_MODE_INFRA && 3383 if (priv->operating_mode == IW_MODE_INFRA &&
3422 is_frame_from_current_bss(priv, header)) { 3384 is_frame_from_current_bss(priv, header)) {
3423 priv->station_was_associated = 0; 3385 priv->station_was_associated = 0;
@@ -3453,12 +3415,12 @@ static void atmel_management_timer(u_long a)
3453 priv->AuthenticationRequestRetryCnt = 0; 3415 priv->AuthenticationRequestRetryCnt = 0;
3454 restart_search(priv); 3416 restart_search(priv);
3455 } else { 3417 } else {
3456 int auth = C80211_MGMT_AAN_OPENSYSTEM; 3418 int auth = WLAN_AUTH_OPEN;
3457 priv->AuthenticationRequestRetryCnt++; 3419 priv->AuthenticationRequestRetryCnt++;
3458 priv->CurrentAuthentTransactionSeqNum = 0x0001; 3420 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3459 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3421 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3460 if (priv->wep_is_on && priv->exclude_unencrypted) 3422 if (priv->wep_is_on && priv->exclude_unencrypted)
3461 auth = C80211_MGMT_AAN_SHAREDKEY; 3423 auth = WLAN_AUTH_SHARED_KEY;
3462 send_authentication_request(priv, auth, NULL, 0); 3424 send_authentication_request(priv, auth, NULL, 0);
3463 } 3425 }
3464 break; 3426 break;
@@ -3558,14 +3520,14 @@ static void atmel_command_irq(struct atmel_private *priv)
3558 priv->station_was_associated = priv->station_is_associated; 3520 priv->station_was_associated = priv->station_is_associated;
3559 atmel_enter_state(priv, STATION_STATE_READY); 3521 atmel_enter_state(priv, STATION_STATE_READY);
3560 } else { 3522 } else {
3561 int auth = C80211_MGMT_AAN_OPENSYSTEM; 3523 int auth = WLAN_AUTH_OPEN;
3562 priv->AuthenticationRequestRetryCnt = 0; 3524 priv->AuthenticationRequestRetryCnt = 0;
3563 atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); 3525 atmel_enter_state(priv, STATION_STATE_AUTHENTICATING);
3564 3526
3565 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3527 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3566 priv->CurrentAuthentTransactionSeqNum = 0x0001; 3528 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3567 if (priv->wep_is_on && priv->exclude_unencrypted) 3529 if (priv->wep_is_on && priv->exclude_unencrypted)
3568 auth = C80211_MGMT_AAN_SHAREDKEY; 3530 auth = WLAN_AUTH_SHARED_KEY;
3569 send_authentication_request(priv, auth, NULL, 0); 3531 send_authentication_request(priv, auth, NULL, 0);
3570 } 3532 }
3571 return; 3533 return;
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 6290c9f7e939..72335c8eb97f 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. 3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4 4
5 This program is free software; you can redistribute it and/or modify it 5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as 6 under the terms of version 2 of the GNU General Public License as
@@ -167,12 +167,12 @@ that only one external action is invoked at a time.
167 167
168#include "ipw2100.h" 168#include "ipw2100.h"
169 169
170#define IPW2100_VERSION "1.1.3" 170#define IPW2100_VERSION "git-1.2.2"
171 171
172#define DRV_NAME "ipw2100" 172#define DRV_NAME "ipw2100"
173#define DRV_VERSION IPW2100_VERSION 173#define DRV_VERSION IPW2100_VERSION
174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
175#define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation" 175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
176 176
177/* Debugging stuff */ 177/* Debugging stuff */
178#ifdef CONFIG_IPW2100_DEBUG 178#ifdef CONFIG_IPW2100_DEBUG
@@ -1418,7 +1418,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
1418 if (priv->status & STATUS_ENABLED) 1418 if (priv->status & STATUS_ENABLED)
1419 return 0; 1419 return 0;
1420 1420
1421 down(&priv->adapter_sem); 1421 mutex_lock(&priv->adapter_mutex);
1422 1422
1423 if (rf_kill_active(priv)) { 1423 if (rf_kill_active(priv)) {
1424 IPW_DEBUG_HC("Command aborted due to RF kill active.\n"); 1424 IPW_DEBUG_HC("Command aborted due to RF kill active.\n");
@@ -1444,7 +1444,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
1444 } 1444 }
1445 1445
1446 fail_up: 1446 fail_up:
1447 up(&priv->adapter_sem); 1447 mutex_unlock(&priv->adapter_mutex);
1448 return err; 1448 return err;
1449} 1449}
1450 1450
@@ -1576,7 +1576,7 @@ static int ipw2100_disable_adapter(struct ipw2100_priv *priv)
1576 cancel_delayed_work(&priv->hang_check); 1576 cancel_delayed_work(&priv->hang_check);
1577 } 1577 }
1578 1578
1579 down(&priv->adapter_sem); 1579 mutex_lock(&priv->adapter_mutex);
1580 1580
1581 err = ipw2100_hw_send_command(priv, &cmd); 1581 err = ipw2100_hw_send_command(priv, &cmd);
1582 if (err) { 1582 if (err) {
@@ -1595,7 +1595,7 @@ static int ipw2100_disable_adapter(struct ipw2100_priv *priv)
1595 IPW_DEBUG_INFO("TODO: implement scan state machine\n"); 1595 IPW_DEBUG_INFO("TODO: implement scan state machine\n");
1596 1596
1597 fail_up: 1597 fail_up:
1598 up(&priv->adapter_sem); 1598 mutex_unlock(&priv->adapter_mutex);
1599 return err; 1599 return err;
1600} 1600}
1601 1601
@@ -1672,6 +1672,18 @@ static int ipw2100_start_scan(struct ipw2100_priv *priv)
1672 return err; 1672 return err;
1673} 1673}
1674 1674
1675static const struct ieee80211_geo ipw_geos[] = {
1676 { /* Restricted */
1677 "---",
1678 .bg_channels = 14,
1679 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
1680 {2427, 4}, {2432, 5}, {2437, 6},
1681 {2442, 7}, {2447, 8}, {2452, 9},
1682 {2457, 10}, {2462, 11}, {2467, 12},
1683 {2472, 13}, {2484, 14}},
1684 },
1685};
1686
1675static int ipw2100_up(struct ipw2100_priv *priv, int deferred) 1687static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1676{ 1688{
1677 unsigned long flags; 1689 unsigned long flags;
@@ -1727,6 +1739,13 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1727 goto exit; 1739 goto exit;
1728 } 1740 }
1729 1741
1742 /* Initialize the geo */
1743 if (ieee80211_set_geo(priv->ieee, &ipw_geos[0])) {
1744 printk(KERN_WARNING DRV_NAME "Could not set geo\n");
1745 return 0;
1746 }
1747 priv->ieee->freq_band = IEEE80211_24GHZ_BAND;
1748
1730 lock = LOCK_NONE; 1749 lock = LOCK_NONE;
1731 if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { 1750 if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) {
1732 printk(KERN_ERR DRV_NAME 1751 printk(KERN_ERR DRV_NAME
@@ -1869,7 +1888,7 @@ static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
1869 priv->status |= STATUS_RESET_PENDING; 1888 priv->status |= STATUS_RESET_PENDING;
1870 spin_unlock_irqrestore(&priv->low_lock, flags); 1889 spin_unlock_irqrestore(&priv->low_lock, flags);
1871 1890
1872 down(&priv->action_sem); 1891 mutex_lock(&priv->action_mutex);
1873 /* stop timed checks so that they don't interfere with reset */ 1892 /* stop timed checks so that they don't interfere with reset */
1874 priv->stop_hang_check = 1; 1893 priv->stop_hang_check = 1;
1875 cancel_delayed_work(&priv->hang_check); 1894 cancel_delayed_work(&priv->hang_check);
@@ -1879,7 +1898,7 @@ static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
1879 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); 1898 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1880 1899
1881 ipw2100_up(priv, 0); 1900 ipw2100_up(priv, 0);
1882 up(&priv->action_sem); 1901 mutex_unlock(&priv->action_mutex);
1883 1902
1884} 1903}
1885 1904
@@ -2371,15 +2390,6 @@ static void isr_rx(struct ipw2100_priv *priv, int i,
2371 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 2390 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
2372 return; 2391 return;
2373 } 2392 }
2374#ifdef CONFIG_IPW2100_MONITOR
2375 if (unlikely(priv->ieee->iw_mode == IW_MODE_MONITOR &&
2376 priv->config & CFG_CRC_CHECK &&
2377 status->flags & IPW_STATUS_FLAG_CRC_ERROR)) {
2378 IPW_DEBUG_RX("CRC error in packet. Dropping.\n");
2379 priv->ieee->stats.rx_errors++;
2380 return;
2381 }
2382#endif
2383 2393
2384 if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR && 2394 if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR &&
2385 !(priv->status & STATUS_ASSOCIATED))) { 2395 !(priv->status & STATUS_ASSOCIATED))) {
@@ -2427,6 +2437,89 @@ static void isr_rx(struct ipw2100_priv *priv, int i,
2427 priv->rx_queue.drv[i].host_addr = packet->dma_addr; 2437 priv->rx_queue.drv[i].host_addr = packet->dma_addr;
2428} 2438}
2429 2439
2440#ifdef CONFIG_IPW2100_MONITOR
2441
2442static void isr_rx_monitor(struct ipw2100_priv *priv, int i,
2443 struct ieee80211_rx_stats *stats)
2444{
2445 struct ipw2100_status *status = &priv->status_queue.drv[i];
2446 struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
2447
2448 /* Magic struct that slots into the radiotap header -- no reason
2449 * to build this manually element by element, we can write it much
2450 * more efficiently than we can parse it. ORDER MATTERS HERE */
2451 struct ipw_rt_hdr {
2452 struct ieee80211_radiotap_header rt_hdr;
2453 s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
2454 } *ipw_rt;
2455
2456 IPW_DEBUG_RX("Handler...\n");
2457
2458 if (unlikely(status->frame_size > skb_tailroom(packet->skb) -
2459 sizeof(struct ipw_rt_hdr))) {
2460 IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!"
2461 " Dropping.\n",
2462 priv->net_dev->name,
2463 status->frame_size,
2464 skb_tailroom(packet->skb));
2465 priv->ieee->stats.rx_errors++;
2466 return;
2467 }
2468
2469 if (unlikely(!netif_running(priv->net_dev))) {
2470 priv->ieee->stats.rx_errors++;
2471 priv->wstats.discard.misc++;
2472 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
2473 return;
2474 }
2475
2476 if (unlikely(priv->config & CFG_CRC_CHECK &&
2477 status->flags & IPW_STATUS_FLAG_CRC_ERROR)) {
2478 IPW_DEBUG_RX("CRC error in packet. Dropping.\n");
2479 priv->ieee->stats.rx_errors++;
2480 return;
2481 }
2482
2483 pci_unmap_single(priv->pci_dev, packet->dma_addr,
2484 sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
2485 memmove(packet->skb->data + sizeof(struct ipw_rt_hdr),
2486 packet->skb->data, status->frame_size);
2487
2488 ipw_rt = (struct ipw_rt_hdr *) packet->skb->data;
2489
2490 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
2491 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
2492 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total hdr+data */
2493
2494 ipw_rt->rt_hdr.it_present = 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL;
2495
2496 ipw_rt->rt_dbmsignal = status->rssi + IPW2100_RSSI_TO_DBM;
2497
2498 skb_put(packet->skb, status->frame_size + sizeof(struct ipw_rt_hdr));
2499
2500 if (!ieee80211_rx(priv->ieee, packet->skb, stats)) {
2501 priv->ieee->stats.rx_errors++;
2502
2503 /* ieee80211_rx failed, so it didn't free the SKB */
2504 dev_kfree_skb_any(packet->skb);
2505 packet->skb = NULL;
2506 }
2507
2508 /* We need to allocate a new SKB and attach it to the RDB. */
2509 if (unlikely(ipw2100_alloc_skb(priv, packet))) {
2510 IPW_DEBUG_WARNING(
2511 "%s: Unable to allocate SKB onto RBD ring - disabling "
2512 "adapter.\n", priv->net_dev->name);
2513 /* TODO: schedule adapter shutdown */
2514 IPW_DEBUG_INFO("TODO: Shutdown adapter...\n");
2515 }
2516
2517 /* Update the RDB entry */
2518 priv->rx_queue.drv[i].host_addr = packet->dma_addr;
2519}
2520
2521#endif
2522
2430static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) 2523static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i)
2431{ 2524{
2432 struct ipw2100_status *status = &priv->status_queue.drv[i]; 2525 struct ipw2100_status *status = &priv->status_queue.drv[i];
@@ -2558,7 +2651,7 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
2558 case P8023_DATA_VAL: 2651 case P8023_DATA_VAL:
2559#ifdef CONFIG_IPW2100_MONITOR 2652#ifdef CONFIG_IPW2100_MONITOR
2560 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 2653 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
2561 isr_rx(priv, i, &stats); 2654 isr_rx_monitor(priv, i, &stats);
2562 break; 2655 break;
2563 } 2656 }
2564#endif 2657#endif
@@ -3750,7 +3843,7 @@ static ssize_t store_memory(struct device *d, struct device_attribute *attr,
3750 struct net_device *dev = priv->net_dev; 3843 struct net_device *dev = priv->net_dev;
3751 const char *p = buf; 3844 const char *p = buf;
3752 3845
3753 (void) dev; /* kill unused-var warning for debug-only code */ 3846 (void)dev; /* kill unused-var warning for debug-only code */
3754 3847
3755 if (count < 1) 3848 if (count < 1)
3756 return count; 3849 return count;
@@ -3863,7 +3956,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
3863#ifdef CONFIG_IPW2100_MONITOR 3956#ifdef CONFIG_IPW2100_MONITOR
3864 case IW_MODE_MONITOR: 3957 case IW_MODE_MONITOR:
3865 priv->last_mode = priv->ieee->iw_mode; 3958 priv->last_mode = priv->ieee->iw_mode;
3866 priv->net_dev->type = ARPHRD_IEEE80211; 3959 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
3867 break; 3960 break;
3868#endif /* CONFIG_IPW2100_MONITOR */ 3961#endif /* CONFIG_IPW2100_MONITOR */
3869 } 3962 }
@@ -4070,7 +4163,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
4070 unsigned long val; 4163 unsigned long val;
4071 char *p = buffer; 4164 char *p = buffer;
4072 4165
4073 (void) dev; /* kill unused-var warning for debug-only code */ 4166 (void)dev; /* kill unused-var warning for debug-only code */
4074 4167
4075 IPW_DEBUG_INFO("enter\n"); 4168 IPW_DEBUG_INFO("enter\n");
4076 4169
@@ -4119,7 +4212,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
4119 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", 4212 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
4120 disable_radio ? "OFF" : "ON"); 4213 disable_radio ? "OFF" : "ON");
4121 4214
4122 down(&priv->action_sem); 4215 mutex_lock(&priv->action_mutex);
4123 4216
4124 if (disable_radio) { 4217 if (disable_radio) {
4125 priv->status |= STATUS_RF_KILL_SW; 4218 priv->status |= STATUS_RF_KILL_SW;
@@ -4137,7 +4230,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
4137 schedule_reset(priv); 4230 schedule_reset(priv);
4138 } 4231 }
4139 4232
4140 up(&priv->action_sem); 4233 mutex_unlock(&priv->action_mutex);
4141 return 1; 4234 return 1;
4142} 4235}
4143 4236
@@ -5107,12 +5200,13 @@ static int ipw2100_set_tx_power(struct ipw2100_priv *priv, u32 tx_power)
5107 .host_command_length = 4 5200 .host_command_length = 4
5108 }; 5201 };
5109 int err = 0; 5202 int err = 0;
5203 u32 tmp = tx_power;
5110 5204
5111 if (tx_power != IPW_TX_POWER_DEFAULT) 5205 if (tx_power != IPW_TX_POWER_DEFAULT)
5112 tx_power = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 / 5206 tmp = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 /
5113 (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM); 5207 (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM);
5114 5208
5115 cmd.host_command_parameters[0] = tx_power; 5209 cmd.host_command_parameters[0] = tmp;
5116 5210
5117 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 5211 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
5118 err = ipw2100_hw_send_command(priv, &cmd); 5212 err = ipw2100_hw_send_command(priv, &cmd);
@@ -5365,9 +5459,12 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode)
5365 SEC_LEVEL_0, 0, 1); 5459 SEC_LEVEL_0, 0, 1);
5366 } else { 5460 } else {
5367 auth_mode = IPW_AUTH_OPEN; 5461 auth_mode = IPW_AUTH_OPEN;
5368 if ((priv->ieee->sec.flags & SEC_AUTH_MODE) && 5462 if (priv->ieee->sec.flags & SEC_AUTH_MODE) {
5369 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) 5463 if (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)
5370 auth_mode = IPW_AUTH_SHARED; 5464 auth_mode = IPW_AUTH_SHARED;
5465 else if (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)
5466 auth_mode = IPW_AUTH_LEAP_CISCO_ID;
5467 }
5371 5468
5372 sec_level = SEC_LEVEL_0; 5469 sec_level = SEC_LEVEL_0;
5373 if (priv->ieee->sec.flags & SEC_LEVEL) 5470 if (priv->ieee->sec.flags & SEC_LEVEL)
@@ -5437,7 +5534,7 @@ static void shim__set_security(struct net_device *dev,
5437 struct ipw2100_priv *priv = ieee80211_priv(dev); 5534 struct ipw2100_priv *priv = ieee80211_priv(dev);
5438 int i, force_update = 0; 5535 int i, force_update = 0;
5439 5536
5440 down(&priv->action_sem); 5537 mutex_lock(&priv->action_mutex);
5441 if (!(priv->status & STATUS_INITIALIZED)) 5538 if (!(priv->status & STATUS_INITIALIZED))
5442 goto done; 5539 goto done;
5443 5540
@@ -5510,7 +5607,7 @@ static void shim__set_security(struct net_device *dev,
5510 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 5607 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
5511 ipw2100_configure_security(priv, 0); 5608 ipw2100_configure_security(priv, 0);
5512 done: 5609 done:
5513 up(&priv->action_sem); 5610 mutex_unlock(&priv->action_mutex);
5514} 5611}
5515 5612
5516static int ipw2100_adapter_setup(struct ipw2100_priv *priv) 5613static int ipw2100_adapter_setup(struct ipw2100_priv *priv)
@@ -5634,7 +5731,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p)
5634 if (!is_valid_ether_addr(addr->sa_data)) 5731 if (!is_valid_ether_addr(addr->sa_data))
5635 return -EADDRNOTAVAIL; 5732 return -EADDRNOTAVAIL;
5636 5733
5637 down(&priv->action_sem); 5734 mutex_lock(&priv->action_mutex);
5638 5735
5639 priv->config |= CFG_CUSTOM_MAC; 5736 priv->config |= CFG_CUSTOM_MAC;
5640 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 5737 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
@@ -5644,12 +5741,12 @@ static int ipw2100_set_address(struct net_device *dev, void *p)
5644 goto done; 5741 goto done;
5645 5742
5646 priv->reset_backoff = 0; 5743 priv->reset_backoff = 0;
5647 up(&priv->action_sem); 5744 mutex_unlock(&priv->action_mutex);
5648 ipw2100_reset_adapter(priv); 5745 ipw2100_reset_adapter(priv);
5649 return 0; 5746 return 0;
5650 5747
5651 done: 5748 done:
5652 up(&priv->action_sem); 5749 mutex_unlock(&priv->action_mutex);
5653 return err; 5750 return err;
5654} 5751}
5655 5752
@@ -5760,6 +5857,9 @@ static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
5760 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { 5857 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
5761 sec.auth_mode = WLAN_AUTH_OPEN; 5858 sec.auth_mode = WLAN_AUTH_OPEN;
5762 ieee->open_wep = 1; 5859 ieee->open_wep = 1;
5860 } else if (value & IW_AUTH_ALG_LEAP) {
5861 sec.auth_mode = WLAN_AUTH_LEAP;
5862 ieee->open_wep = 1;
5763 } else 5863 } else
5764 return -EINVAL; 5864 return -EINVAL;
5765 5865
@@ -5771,8 +5871,8 @@ static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
5771 return ret; 5871 return ret;
5772} 5872}
5773 5873
5774void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, 5874static void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
5775 char *wpa_ie, int wpa_ie_len) 5875 char *wpa_ie, int wpa_ie_len)
5776{ 5876{
5777 5877
5778 struct ipw2100_wpa_assoc_frame frame; 5878 struct ipw2100_wpa_assoc_frame frame;
@@ -5989,8 +6089,8 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
5989 strcpy(priv->nick, "ipw2100"); 6089 strcpy(priv->nick, "ipw2100");
5990 6090
5991 spin_lock_init(&priv->low_lock); 6091 spin_lock_init(&priv->low_lock);
5992 sema_init(&priv->action_sem, 1); 6092 mutex_init(&priv->action_mutex);
5993 sema_init(&priv->adapter_sem, 1); 6093 mutex_init(&priv->adapter_mutex);
5994 6094
5995 init_waitqueue_head(&priv->wait_command_queue); 6095 init_waitqueue_head(&priv->wait_command_queue);
5996 6096
@@ -6155,7 +6255,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6155 * member to call a function that then just turns and calls ipw2100_up. 6255 * member to call a function that then just turns and calls ipw2100_up.
6156 * net_dev->init is called after name allocation but before the 6256 * net_dev->init is called after name allocation but before the
6157 * notifier chain is called */ 6257 * notifier chain is called */
6158 down(&priv->action_sem); 6258 mutex_lock(&priv->action_mutex);
6159 err = register_netdev(dev); 6259 err = register_netdev(dev);
6160 if (err) { 6260 if (err) {
6161 printk(KERN_WARNING DRV_NAME 6261 printk(KERN_WARNING DRV_NAME
@@ -6191,12 +6291,12 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6191 6291
6192 priv->status |= STATUS_INITIALIZED; 6292 priv->status |= STATUS_INITIALIZED;
6193 6293
6194 up(&priv->action_sem); 6294 mutex_unlock(&priv->action_mutex);
6195 6295
6196 return 0; 6296 return 0;
6197 6297
6198 fail_unlock: 6298 fail_unlock:
6199 up(&priv->action_sem); 6299 mutex_unlock(&priv->action_mutex);
6200 6300
6201 fail: 6301 fail:
6202 if (dev) { 6302 if (dev) {
@@ -6236,7 +6336,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6236 struct net_device *dev; 6336 struct net_device *dev;
6237 6337
6238 if (priv) { 6338 if (priv) {
6239 down(&priv->action_sem); 6339 mutex_lock(&priv->action_mutex);
6240 6340
6241 priv->status &= ~STATUS_INITIALIZED; 6341 priv->status &= ~STATUS_INITIALIZED;
6242 6342
@@ -6251,9 +6351,9 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6251 /* Take down the hardware */ 6351 /* Take down the hardware */
6252 ipw2100_down(priv); 6352 ipw2100_down(priv);
6253 6353
6254 /* Release the semaphore so that the network subsystem can 6354 /* Release the mutex so that the network subsystem can
6255 * complete any needed calls into the driver... */ 6355 * complete any needed calls into the driver... */
6256 up(&priv->action_sem); 6356 mutex_unlock(&priv->action_mutex);
6257 6357
6258 /* Unregister the device first - this results in close() 6358 /* Unregister the device first - this results in close()
6259 * being called if the device is open. If we free storage 6359 * being called if the device is open. If we free storage
@@ -6292,7 +6392,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
6292 6392
6293 IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name); 6393 IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name);
6294 6394
6295 down(&priv->action_sem); 6395 mutex_lock(&priv->action_mutex);
6296 if (priv->status & STATUS_INITIALIZED) { 6396 if (priv->status & STATUS_INITIALIZED) {
6297 /* Take down the device; powers it off, etc. */ 6397 /* Take down the device; powers it off, etc. */
6298 ipw2100_down(priv); 6398 ipw2100_down(priv);
@@ -6305,7 +6405,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
6305 pci_disable_device(pci_dev); 6405 pci_disable_device(pci_dev);
6306 pci_set_power_state(pci_dev, PCI_D3hot); 6406 pci_set_power_state(pci_dev, PCI_D3hot);
6307 6407
6308 up(&priv->action_sem); 6408 mutex_unlock(&priv->action_mutex);
6309 6409
6310 return 0; 6410 return 0;
6311} 6411}
@@ -6319,7 +6419,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
6319 if (IPW2100_PM_DISABLED) 6419 if (IPW2100_PM_DISABLED)
6320 return 0; 6420 return 0;
6321 6421
6322 down(&priv->action_sem); 6422 mutex_lock(&priv->action_mutex);
6323 6423
6324 IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); 6424 IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name);
6325 6425
@@ -6345,7 +6445,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
6345 if (!(priv->status & STATUS_RF_KILL_SW)) 6445 if (!(priv->status & STATUS_RF_KILL_SW))
6346 ipw2100_up(priv, 0); 6446 ipw2100_up(priv, 0);
6347 6447
6348 up(&priv->action_sem); 6448 mutex_unlock(&priv->action_mutex);
6349 6449
6350 return 0; 6450 return 0;
6351} 6451}
@@ -6509,7 +6609,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev,
6509 if (priv->ieee->iw_mode == IW_MODE_INFRA) 6609 if (priv->ieee->iw_mode == IW_MODE_INFRA)
6510 return -EOPNOTSUPP; 6610 return -EOPNOTSUPP;
6511 6611
6512 down(&priv->action_sem); 6612 mutex_lock(&priv->action_mutex);
6513 if (!(priv->status & STATUS_INITIALIZED)) { 6613 if (!(priv->status & STATUS_INITIALIZED)) {
6514 err = -EIO; 6614 err = -EIO;
6515 goto done; 6615 goto done;
@@ -6540,7 +6640,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev,
6540 } 6640 }
6541 6641
6542 done: 6642 done:
6543 up(&priv->action_sem); 6643 mutex_unlock(&priv->action_mutex);
6544 return err; 6644 return err;
6545} 6645}
6546 6646
@@ -6581,7 +6681,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev,
6581 if (wrqu->mode == priv->ieee->iw_mode) 6681 if (wrqu->mode == priv->ieee->iw_mode)
6582 return 0; 6682 return 0;
6583 6683
6584 down(&priv->action_sem); 6684 mutex_lock(&priv->action_mutex);
6585 if (!(priv->status & STATUS_INITIALIZED)) { 6685 if (!(priv->status & STATUS_INITIALIZED)) {
6586 err = -EIO; 6686 err = -EIO;
6587 goto done; 6687 goto done;
@@ -6604,7 +6704,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev,
6604 } 6704 }
6605 6705
6606 done: 6706 done:
6607 up(&priv->action_sem); 6707 mutex_unlock(&priv->action_mutex);
6608 return err; 6708 return err;
6609} 6709}
6610 6710
@@ -6786,7 +6886,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
6786 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 6886 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
6787 return -EINVAL; 6887 return -EINVAL;
6788 6888
6789 down(&priv->action_sem); 6889 mutex_lock(&priv->action_mutex);
6790 if (!(priv->status & STATUS_INITIALIZED)) { 6890 if (!(priv->status & STATUS_INITIALIZED)) {
6791 err = -EIO; 6891 err = -EIO;
6792 goto done; 6892 goto done;
@@ -6815,7 +6915,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
6815 wrqu->ap_addr.sa_data[5] & 0xff); 6915 wrqu->ap_addr.sa_data[5] & 0xff);
6816 6916
6817 done: 6917 done:
6818 up(&priv->action_sem); 6918 mutex_unlock(&priv->action_mutex);
6819 return err; 6919 return err;
6820} 6920}
6821 6921
@@ -6851,7 +6951,7 @@ static int ipw2100_wx_set_essid(struct net_device *dev,
6851 int length = 0; 6951 int length = 0;
6852 int err = 0; 6952 int err = 0;
6853 6953
6854 down(&priv->action_sem); 6954 mutex_lock(&priv->action_mutex);
6855 if (!(priv->status & STATUS_INITIALIZED)) { 6955 if (!(priv->status & STATUS_INITIALIZED)) {
6856 err = -EIO; 6956 err = -EIO;
6857 goto done; 6957 goto done;
@@ -6888,7 +6988,7 @@ static int ipw2100_wx_set_essid(struct net_device *dev,
6888 err = ipw2100_set_essid(priv, essid, length, 0); 6988 err = ipw2100_set_essid(priv, essid, length, 0);
6889 6989
6890 done: 6990 done:
6891 up(&priv->action_sem); 6991 mutex_unlock(&priv->action_mutex);
6892 return err; 6992 return err;
6893} 6993}
6894 6994
@@ -6969,7 +7069,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev,
6969 u32 rate; 7069 u32 rate;
6970 int err = 0; 7070 int err = 0;
6971 7071
6972 down(&priv->action_sem); 7072 mutex_lock(&priv->action_mutex);
6973 if (!(priv->status & STATUS_INITIALIZED)) { 7073 if (!(priv->status & STATUS_INITIALIZED)) {
6974 err = -EIO; 7074 err = -EIO;
6975 goto done; 7075 goto done;
@@ -6996,7 +7096,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev,
6996 7096
6997 IPW_DEBUG_WX("SET Rate -> %04X \n", rate); 7097 IPW_DEBUG_WX("SET Rate -> %04X \n", rate);
6998 done: 7098 done:
6999 up(&priv->action_sem); 7099 mutex_unlock(&priv->action_mutex);
7000 return err; 7100 return err;
7001} 7101}
7002 7102
@@ -7016,7 +7116,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
7016 return 0; 7116 return 0;
7017 } 7117 }
7018 7118
7019 down(&priv->action_sem); 7119 mutex_lock(&priv->action_mutex);
7020 if (!(priv->status & STATUS_INITIALIZED)) { 7120 if (!(priv->status & STATUS_INITIALIZED)) {
7021 err = -EIO; 7121 err = -EIO;
7022 goto done; 7122 goto done;
@@ -7048,7 +7148,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
7048 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 7148 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
7049 7149
7050 done: 7150 done:
7051 up(&priv->action_sem); 7151 mutex_unlock(&priv->action_mutex);
7052 return err; 7152 return err;
7053} 7153}
7054 7154
@@ -7063,7 +7163,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev,
7063 if (wrqu->rts.fixed == 0) 7163 if (wrqu->rts.fixed == 0)
7064 return -EINVAL; 7164 return -EINVAL;
7065 7165
7066 down(&priv->action_sem); 7166 mutex_lock(&priv->action_mutex);
7067 if (!(priv->status & STATUS_INITIALIZED)) { 7167 if (!(priv->status & STATUS_INITIALIZED)) {
7068 err = -EIO; 7168 err = -EIO;
7069 goto done; 7169 goto done;
@@ -7083,7 +7183,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev,
7083 7183
7084 IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value); 7184 IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value);
7085 done: 7185 done:
7086 up(&priv->action_sem); 7186 mutex_unlock(&priv->action_mutex);
7087 return err; 7187 return err;
7088} 7188}
7089 7189
@@ -7134,7 +7234,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
7134 value = wrqu->txpower.value; 7234 value = wrqu->txpower.value;
7135 } 7235 }
7136 7236
7137 down(&priv->action_sem); 7237 mutex_lock(&priv->action_mutex);
7138 if (!(priv->status & STATUS_INITIALIZED)) { 7238 if (!(priv->status & STATUS_INITIALIZED)) {
7139 err = -EIO; 7239 err = -EIO;
7140 goto done; 7240 goto done;
@@ -7145,7 +7245,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
7145 IPW_DEBUG_WX("SET TX Power -> %d \n", value); 7245 IPW_DEBUG_WX("SET TX Power -> %d \n", value);
7146 7246
7147 done: 7247 done:
7148 up(&priv->action_sem); 7248 mutex_unlock(&priv->action_mutex);
7149 return err; 7249 return err;
7150} 7250}
7151 7251
@@ -7237,7 +7337,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
7237 if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) 7337 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
7238 return 0; 7338 return 0;
7239 7339
7240 down(&priv->action_sem); 7340 mutex_lock(&priv->action_mutex);
7241 if (!(priv->status & STATUS_INITIALIZED)) { 7341 if (!(priv->status & STATUS_INITIALIZED)) {
7242 err = -EIO; 7342 err = -EIO;
7243 goto done; 7343 goto done;
@@ -7264,7 +7364,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
7264 IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value); 7364 IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value);
7265 7365
7266 done: 7366 done:
7267 up(&priv->action_sem); 7367 mutex_unlock(&priv->action_mutex);
7268 return err; 7368 return err;
7269} 7369}
7270 7370
@@ -7307,7 +7407,7 @@ static int ipw2100_wx_set_scan(struct net_device *dev,
7307 struct ipw2100_priv *priv = ieee80211_priv(dev); 7407 struct ipw2100_priv *priv = ieee80211_priv(dev);
7308 int err = 0; 7408 int err = 0;
7309 7409
7310 down(&priv->action_sem); 7410 mutex_lock(&priv->action_mutex);
7311 if (!(priv->status & STATUS_INITIALIZED)) { 7411 if (!(priv->status & STATUS_INITIALIZED)) {
7312 err = -EIO; 7412 err = -EIO;
7313 goto done; 7413 goto done;
@@ -7322,7 +7422,7 @@ static int ipw2100_wx_set_scan(struct net_device *dev,
7322 } 7422 }
7323 7423
7324 done: 7424 done:
7325 up(&priv->action_sem); 7425 mutex_unlock(&priv->action_mutex);
7326 return err; 7426 return err;
7327} 7427}
7328 7428
@@ -7372,7 +7472,7 @@ static int ipw2100_wx_set_power(struct net_device *dev,
7372 struct ipw2100_priv *priv = ieee80211_priv(dev); 7472 struct ipw2100_priv *priv = ieee80211_priv(dev);
7373 int err = 0; 7473 int err = 0;
7374 7474
7375 down(&priv->action_sem); 7475 mutex_lock(&priv->action_mutex);
7376 if (!(priv->status & STATUS_INITIALIZED)) { 7476 if (!(priv->status & STATUS_INITIALIZED)) {
7377 err = -EIO; 7477 err = -EIO;
7378 goto done; 7478 goto done;
@@ -7405,7 +7505,7 @@ static int ipw2100_wx_set_power(struct net_device *dev,
7405 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); 7505 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
7406 7506
7407 done: 7507 done:
7408 up(&priv->action_sem); 7508 mutex_unlock(&priv->action_mutex);
7409 return err; 7509 return err;
7410 7510
7411} 7511}
@@ -7709,7 +7809,7 @@ static int ipw2100_wx_set_promisc(struct net_device *dev,
7709 int enable = (parms[0] > 0); 7809 int enable = (parms[0] > 0);
7710 int err = 0; 7810 int err = 0;
7711 7811
7712 down(&priv->action_sem); 7812 mutex_lock(&priv->action_mutex);
7713 if (!(priv->status & STATUS_INITIALIZED)) { 7813 if (!(priv->status & STATUS_INITIALIZED)) {
7714 err = -EIO; 7814 err = -EIO;
7715 goto done; 7815 goto done;
@@ -7727,7 +7827,7 @@ static int ipw2100_wx_set_promisc(struct net_device *dev,
7727 err = ipw2100_switch_mode(priv, priv->last_mode); 7827 err = ipw2100_switch_mode(priv, priv->last_mode);
7728 } 7828 }
7729 done: 7829 done:
7730 up(&priv->action_sem); 7830 mutex_unlock(&priv->action_mutex);
7731 return err; 7831 return err;
7732} 7832}
7733 7833
@@ -7750,7 +7850,7 @@ static int ipw2100_wx_set_powermode(struct net_device *dev,
7750 struct ipw2100_priv *priv = ieee80211_priv(dev); 7850 struct ipw2100_priv *priv = ieee80211_priv(dev);
7751 int err = 0, mode = *(int *)extra; 7851 int err = 0, mode = *(int *)extra;
7752 7852
7753 down(&priv->action_sem); 7853 mutex_lock(&priv->action_mutex);
7754 if (!(priv->status & STATUS_INITIALIZED)) { 7854 if (!(priv->status & STATUS_INITIALIZED)) {
7755 err = -EIO; 7855 err = -EIO;
7756 goto done; 7856 goto done;
@@ -7762,7 +7862,7 @@ static int ipw2100_wx_set_powermode(struct net_device *dev,
7762 if (priv->power_mode != mode) 7862 if (priv->power_mode != mode)
7763 err = ipw2100_set_power_mode(priv, mode); 7863 err = ipw2100_set_power_mode(priv, mode);
7764 done: 7864 done:
7765 up(&priv->action_sem); 7865 mutex_unlock(&priv->action_mutex);
7766 return err; 7866 return err;
7767} 7867}
7768 7868
@@ -7814,7 +7914,7 @@ static int ipw2100_wx_set_preamble(struct net_device *dev,
7814 struct ipw2100_priv *priv = ieee80211_priv(dev); 7914 struct ipw2100_priv *priv = ieee80211_priv(dev);
7815 int err, mode = *(int *)extra; 7915 int err, mode = *(int *)extra;
7816 7916
7817 down(&priv->action_sem); 7917 mutex_lock(&priv->action_mutex);
7818 if (!(priv->status & STATUS_INITIALIZED)) { 7918 if (!(priv->status & STATUS_INITIALIZED)) {
7819 err = -EIO; 7919 err = -EIO;
7820 goto done; 7920 goto done;
@@ -7832,7 +7932,7 @@ static int ipw2100_wx_set_preamble(struct net_device *dev,
7832 err = ipw2100_system_config(priv, 0); 7932 err = ipw2100_system_config(priv, 0);
7833 7933
7834 done: 7934 done:
7835 up(&priv->action_sem); 7935 mutex_unlock(&priv->action_mutex);
7836 return err; 7936 return err;
7837} 7937}
7838 7938
@@ -7862,7 +7962,7 @@ static int ipw2100_wx_set_crc_check(struct net_device *dev,
7862 struct ipw2100_priv *priv = ieee80211_priv(dev); 7962 struct ipw2100_priv *priv = ieee80211_priv(dev);
7863 int err, mode = *(int *)extra; 7963 int err, mode = *(int *)extra;
7864 7964
7865 down(&priv->action_sem); 7965 mutex_lock(&priv->action_mutex);
7866 if (!(priv->status & STATUS_INITIALIZED)) { 7966 if (!(priv->status & STATUS_INITIALIZED)) {
7867 err = -EIO; 7967 err = -EIO;
7868 goto done; 7968 goto done;
@@ -7879,7 +7979,7 @@ static int ipw2100_wx_set_crc_check(struct net_device *dev,
7879 err = 0; 7979 err = 0;
7880 7980
7881 done: 7981 done:
7882 up(&priv->action_sem); 7982 mutex_unlock(&priv->action_mutex);
7883 return err; 7983 return err;
7884} 7984}
7885 7985
@@ -8184,11 +8284,11 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
8184 if (priv->status & STATUS_STOPPING) 8284 if (priv->status & STATUS_STOPPING)
8185 return; 8285 return;
8186 8286
8187 down(&priv->action_sem); 8287 mutex_lock(&priv->action_mutex);
8188 8288
8189 IPW_DEBUG_WX("enter\n"); 8289 IPW_DEBUG_WX("enter\n");
8190 8290
8191 up(&priv->action_sem); 8291 mutex_unlock(&priv->action_mutex);
8192 8292
8193 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 8293 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
8194 8294
@@ -8211,7 +8311,7 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
8211 8311
8212 if (!(priv->status & STATUS_ASSOCIATED)) { 8312 if (!(priv->status & STATUS_ASSOCIATED)) {
8213 IPW_DEBUG_WX("Configuring ESSID\n"); 8313 IPW_DEBUG_WX("Configuring ESSID\n");
8214 down(&priv->action_sem); 8314 mutex_lock(&priv->action_mutex);
8215 /* This is a disassociation event, so kick the firmware to 8315 /* This is a disassociation event, so kick the firmware to
8216 * look for another AP */ 8316 * look for another AP */
8217 if (priv->config & CFG_STATIC_ESSID) 8317 if (priv->config & CFG_STATIC_ESSID)
@@ -8219,7 +8319,7 @@ static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
8219 0); 8319 0);
8220 else 8320 else
8221 ipw2100_set_essid(priv, NULL, 0, 0); 8321 ipw2100_set_essid(priv, NULL, 0, 0);
8222 up(&priv->action_sem); 8322 mutex_unlock(&priv->action_mutex);
8223 } 8323 }
8224 8324
8225 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); 8325 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index f6c51441fa87..55b7227198df 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. 3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4 4
5 This program is free software; you can redistribute it and/or modify it 5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as 6 under the terms of version 2 of the GNU General Public License as
@@ -41,7 +41,12 @@
41 41
42#include <net/ieee80211.h> 42#include <net/ieee80211.h>
43 43
44#ifdef CONFIG_IPW2100_MONITOR
45#include <net/ieee80211_radiotap.h>
46#endif
47
44#include <linux/workqueue.h> 48#include <linux/workqueue.h>
49#include <linux/mutex.h>
45 50
46struct ipw2100_priv; 51struct ipw2100_priv;
47struct ipw2100_tx_packet; 52struct ipw2100_tx_packet;
@@ -392,8 +397,10 @@ struct ipw2100_notification {
392#define IPW_WEP104_CIPHER (1<<5) 397#define IPW_WEP104_CIPHER (1<<5)
393#define IPW_CKIP_CIPHER (1<<6) 398#define IPW_CKIP_CIPHER (1<<6)
394 399
395#define IPW_AUTH_OPEN 0 400#define IPW_AUTH_OPEN 0
396#define IPW_AUTH_SHARED 1 401#define IPW_AUTH_SHARED 1
402#define IPW_AUTH_LEAP 2
403#define IPW_AUTH_LEAP_CISCO_ID 0x80
397 404
398struct statistic { 405struct statistic {
399 int value; 406 int value;
@@ -588,8 +595,8 @@ struct ipw2100_priv {
588 int inta_other; 595 int inta_other;
589 596
590 spinlock_t low_lock; 597 spinlock_t low_lock;
591 struct semaphore action_sem; 598 struct mutex action_mutex;
592 struct semaphore adapter_sem; 599 struct mutex adapter_mutex;
593 600
594 wait_queue_head_t wait_command_queue; 601 wait_queue_head_t wait_command_queue;
595}; 602};
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 287676ad80df..9dce522526c5 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. 3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4 4
5 802.11 status code portion of this file from ethereal-0.10.6: 5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB 6 Copyright 2000, Axis Communications AB
@@ -33,9 +33,9 @@
33#include "ipw2200.h" 33#include "ipw2200.h"
34#include <linux/version.h> 34#include <linux/version.h>
35 35
36#define IPW2200_VERSION "git-1.0.8" 36#define IPW2200_VERSION "git-1.1.1"
37#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" 37#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38#define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation" 38#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
39#define DRV_VERSION IPW2200_VERSION 39#define DRV_VERSION IPW2200_VERSION
40 40
41#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) 41#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
@@ -55,7 +55,9 @@ static int associate = 1;
55static int auto_create = 1; 55static int auto_create = 1;
56static int led = 0; 56static int led = 0;
57static int disable = 0; 57static int disable = 0;
58static int hwcrypto = 1; 58static int bt_coexist = 0;
59static int hwcrypto = 0;
60static int roaming = 1;
59static const char ipw_modes[] = { 61static const char ipw_modes[] = {
60 'a', 'b', 'g', '?' 62 'a', 'b', 'g', '?'
61}; 63};
@@ -151,12 +153,6 @@ static int init_supported_rates(struct ipw_priv *priv,
151static void ipw_set_hwcrypto_keys(struct ipw_priv *); 153static void ipw_set_hwcrypto_keys(struct ipw_priv *);
152static void ipw_send_wep_keys(struct ipw_priv *, int); 154static void ipw_send_wep_keys(struct ipw_priv *, int);
153 155
154static int ipw_is_valid_channel(struct ieee80211_device *, u8);
155static int ipw_channel_to_index(struct ieee80211_device *, u8);
156static u8 ipw_freq_to_channel(struct ieee80211_device *, u32);
157static int ipw_set_geo(struct ieee80211_device *, const struct ieee80211_geo *);
158static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *);
159
160static int snprint_line(char *buf, size_t count, 156static int snprint_line(char *buf, size_t count,
161 const u8 * data, u32 len, u32 ofs) 157 const u8 * data, u32 len, u32 ofs)
162{ 158{
@@ -227,12 +223,15 @@ static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
227 return total; 223 return total;
228} 224}
229 225
226/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
230static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); 227static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
231#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) 228#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
232 229
230/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
233static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); 231static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
234#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) 232#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
235 233
234/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
236static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); 235static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
237static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) 236static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
238{ 237{
@@ -241,6 +240,7 @@ static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
241 _ipw_write_reg8(a, b, c); 240 _ipw_write_reg8(a, b, c);
242} 241}
243 242
243/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
244static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); 244static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
245static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) 245static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
246{ 246{
@@ -249,6 +249,7 @@ static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
249 _ipw_write_reg16(a, b, c); 249 _ipw_write_reg16(a, b, c);
250} 250}
251 251
252/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
252static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); 253static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
253static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) 254static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
254{ 255{
@@ -257,48 +258,70 @@ static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
257 _ipw_write_reg32(a, b, c); 258 _ipw_write_reg32(a, b, c);
258} 259}
259 260
261/* 8-bit direct write (low 4K) */
260#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs)) 262#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
263
264/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
261#define ipw_write8(ipw, ofs, val) \ 265#define ipw_write8(ipw, ofs, val) \
262 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ 266 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
263 _ipw_write8(ipw, ofs, val) 267 _ipw_write8(ipw, ofs, val)
264 268
269/* 16-bit direct write (low 4K) */
265#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs)) 270#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
271
272/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
266#define ipw_write16(ipw, ofs, val) \ 273#define ipw_write16(ipw, ofs, val) \
267 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ 274 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
268 _ipw_write16(ipw, ofs, val) 275 _ipw_write16(ipw, ofs, val)
269 276
277/* 32-bit direct write (low 4K) */
270#define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs)) 278#define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
279
280/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
271#define ipw_write32(ipw, ofs, val) \ 281#define ipw_write32(ipw, ofs, val) \
272 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ 282 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
273 _ipw_write32(ipw, ofs, val) 283 _ipw_write32(ipw, ofs, val)
274 284
285/* 8-bit direct read (low 4K) */
275#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs)) 286#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
287
288/* 8-bit direct read (low 4K), with debug wrapper */
276static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) 289static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
277{ 290{
278 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs)); 291 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
279 return _ipw_read8(ipw, ofs); 292 return _ipw_read8(ipw, ofs);
280} 293}
281 294
295/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
282#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs) 296#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
283 297
298/* 16-bit direct read (low 4K) */
284#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs)) 299#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
300
301/* 16-bit direct read (low 4K), with debug wrapper */
285static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) 302static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
286{ 303{
287 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs)); 304 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
288 return _ipw_read16(ipw, ofs); 305 return _ipw_read16(ipw, ofs);
289} 306}
290 307
308/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
291#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs) 309#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
292 310
311/* 32-bit direct read (low 4K) */
293#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs)) 312#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
313
314/* 32-bit direct read (low 4K), with debug wrapper */
294static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) 315static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
295{ 316{
296 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs)); 317 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
297 return _ipw_read32(ipw, ofs); 318 return _ipw_read32(ipw, ofs);
298} 319}
299 320
321/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
300#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs) 322#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
301 323
324/* multi-byte read (above 4K), with debug wrapper */
302static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); 325static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
303static inline void __ipw_read_indirect(const char *f, int l, 326static inline void __ipw_read_indirect(const char *f, int l,
304 struct ipw_priv *a, u32 b, u8 * c, int d) 327 struct ipw_priv *a, u32 b, u8 * c, int d)
@@ -308,15 +331,17 @@ static inline void __ipw_read_indirect(const char *f, int l,
308 _ipw_read_indirect(a, b, c, d); 331 _ipw_read_indirect(a, b, c, d);
309} 332}
310 333
334/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
311#define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d) 335#define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
312 336
337/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
313static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, 338static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
314 int num); 339 int num);
315#define ipw_write_indirect(a, b, c, d) \ 340#define ipw_write_indirect(a, b, c, d) \
316 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ 341 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
317 _ipw_write_indirect(a, b, c, d) 342 _ipw_write_indirect(a, b, c, d)
318 343
319/* indirect write s */ 344/* 32-bit indirect write (above 4K) */
320static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) 345static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
321{ 346{
322 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); 347 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
@@ -324,22 +349,29 @@ static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
324 _ipw_write32(priv, IPW_INDIRECT_DATA, value); 349 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
325} 350}
326 351
352/* 8-bit indirect write (above 4K) */
327static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) 353static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
328{ 354{
355 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
356 u32 dif_len = reg - aligned_addr;
357
329 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 358 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
330 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); 359 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
331 _ipw_write8(priv, IPW_INDIRECT_DATA, value); 360 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
332} 361}
333 362
363/* 16-bit indirect write (above 4K) */
334static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) 364static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
335{ 365{
366 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
367 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
368
336 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 369 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
337 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); 370 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
338 _ipw_write16(priv, IPW_INDIRECT_DATA, value); 371 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
339} 372}
340 373
341/* indirect read s */ 374/* 8-bit indirect read (above 4K) */
342
343static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) 375static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
344{ 376{
345 u32 word; 377 u32 word;
@@ -349,6 +381,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
349 return (word >> ((reg & 0x3) * 8)) & 0xff; 381 return (word >> ((reg & 0x3) * 8)) & 0xff;
350} 382}
351 383
384/* 32-bit indirect read (above 4K) */
352static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) 385static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
353{ 386{
354 u32 value; 387 u32 value;
@@ -361,11 +394,12 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
361 return value; 394 return value;
362} 395}
363 396
364/* iterative/auto-increment 32 bit reads and writes */ 397/* General purpose, no alignment requirement, iterative (multi-byte) read, */
398/* for area above 1st 4K of SRAM/reg space */
365static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 399static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
366 int num) 400 int num)
367{ 401{
368 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; 402 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
369 u32 dif_len = addr - aligned_addr; 403 u32 dif_len = addr - aligned_addr;
370 u32 i; 404 u32 i;
371 405
@@ -375,7 +409,7 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
375 return; 409 return;
376 } 410 }
377 411
378 /* Read the first nibble byte by byte */ 412 /* Read the first dword (or portion) byte by byte */
379 if (unlikely(dif_len)) { 413 if (unlikely(dif_len)) {
380 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 414 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
381 /* Start reading at aligned_addr + dif_len */ 415 /* Start reading at aligned_addr + dif_len */
@@ -384,11 +418,12 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
384 aligned_addr += 4; 418 aligned_addr += 4;
385 } 419 }
386 420
421 /* Read all of the middle dwords as dwords, with auto-increment */
387 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 422 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
388 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 423 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
389 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); 424 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
390 425
391 /* Copy the last nibble */ 426 /* Read the last dword (or portion) byte by byte */
392 if (unlikely(num)) { 427 if (unlikely(num)) {
393 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 428 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
394 for (i = 0; num > 0; i++, num--) 429 for (i = 0; num > 0; i++, num--)
@@ -396,10 +431,12 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
396 } 431 }
397} 432}
398 433
434/* General purpose, no alignment requirement, iterative (multi-byte) write, */
435/* for area above 1st 4K of SRAM/reg space */
399static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 436static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
400 int num) 437 int num)
401{ 438{
402 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; 439 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
403 u32 dif_len = addr - aligned_addr; 440 u32 dif_len = addr - aligned_addr;
404 u32 i; 441 u32 i;
405 442
@@ -409,20 +446,21 @@ static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
409 return; 446 return;
410 } 447 }
411 448
412 /* Write the first nibble byte by byte */ 449 /* Write the first dword (or portion) byte by byte */
413 if (unlikely(dif_len)) { 450 if (unlikely(dif_len)) {
414 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 451 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
415 /* Start reading at aligned_addr + dif_len */ 452 /* Start writing at aligned_addr + dif_len */
416 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) 453 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
417 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 454 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
418 aligned_addr += 4; 455 aligned_addr += 4;
419 } 456 }
420 457
458 /* Write all of the middle dwords as dwords, with auto-increment */
421 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 459 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
422 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 460 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
423 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); 461 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
424 462
425 /* Copy the last nibble */ 463 /* Write the last dword (or portion) byte by byte */
426 if (unlikely(num)) { 464 if (unlikely(num)) {
427 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 465 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
428 for (i = 0; num > 0; i++, num--, buf++) 466 for (i = 0; num > 0; i++, num--, buf++)
@@ -430,17 +468,21 @@ static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
430 } 468 }
431} 469}
432 470
471/* General purpose, no alignment requirement, iterative (multi-byte) write, */
472/* for 1st 4K of SRAM/regs space */
433static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, 473static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
434 int num) 474 int num)
435{ 475{
436 memcpy_toio((priv->hw_base + addr), buf, num); 476 memcpy_toio((priv->hw_base + addr), buf, num);
437} 477}
438 478
479/* Set bit(s) in low 4K of SRAM/regs */
439static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) 480static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
440{ 481{
441 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); 482 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
442} 483}
443 484
485/* Clear bit(s) in low 4K of SRAM/regs */
444static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) 486static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
445{ 487{
446 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); 488 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
@@ -701,7 +743,7 @@ static void ipw_init_ordinals(struct ipw_priv *priv)
701 743
702} 744}
703 745
704u32 ipw_register_toggle(u32 reg) 746static u32 ipw_register_toggle(u32 reg)
705{ 747{
706 reg &= ~IPW_START_STANDBY; 748 reg &= ~IPW_START_STANDBY;
707 if (reg & IPW_GATE_ODMA) 749 if (reg & IPW_GATE_ODMA)
@@ -722,11 +764,11 @@ u32 ipw_register_toggle(u32 reg)
722 * - On radio OFF, turn off any LEDs started during radio on 764 * - On radio OFF, turn off any LEDs started during radio on
723 * 765 *
724 */ 766 */
725#define LD_TIME_LINK_ON 300 767#define LD_TIME_LINK_ON msecs_to_jiffies(300)
726#define LD_TIME_LINK_OFF 2700 768#define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
727#define LD_TIME_ACT_ON 250 769#define LD_TIME_ACT_ON msecs_to_jiffies(250)
728 770
729void ipw_led_link_on(struct ipw_priv *priv) 771static void ipw_led_link_on(struct ipw_priv *priv)
730{ 772{
731 unsigned long flags; 773 unsigned long flags;
732 u32 led; 774 u32 led;
@@ -764,12 +806,12 @@ void ipw_led_link_on(struct ipw_priv *priv)
764static void ipw_bg_led_link_on(void *data) 806static void ipw_bg_led_link_on(void *data)
765{ 807{
766 struct ipw_priv *priv = data; 808 struct ipw_priv *priv = data;
767 down(&priv->sem); 809 mutex_lock(&priv->mutex);
768 ipw_led_link_on(data); 810 ipw_led_link_on(data);
769 up(&priv->sem); 811 mutex_unlock(&priv->mutex);
770} 812}
771 813
772void ipw_led_link_off(struct ipw_priv *priv) 814static void ipw_led_link_off(struct ipw_priv *priv)
773{ 815{
774 unsigned long flags; 816 unsigned long flags;
775 u32 led; 817 u32 led;
@@ -808,9 +850,9 @@ void ipw_led_link_off(struct ipw_priv *priv)
808static void ipw_bg_led_link_off(void *data) 850static void ipw_bg_led_link_off(void *data)
809{ 851{
810 struct ipw_priv *priv = data; 852 struct ipw_priv *priv = data;
811 down(&priv->sem); 853 mutex_lock(&priv->mutex);
812 ipw_led_link_off(data); 854 ipw_led_link_off(data);
813 up(&priv->sem); 855 mutex_unlock(&priv->mutex);
814} 856}
815 857
816static void __ipw_led_activity_on(struct ipw_priv *priv) 858static void __ipw_led_activity_on(struct ipw_priv *priv)
@@ -847,6 +889,7 @@ static void __ipw_led_activity_on(struct ipw_priv *priv)
847 } 889 }
848} 890}
849 891
892#if 0
850void ipw_led_activity_on(struct ipw_priv *priv) 893void ipw_led_activity_on(struct ipw_priv *priv)
851{ 894{
852 unsigned long flags; 895 unsigned long flags;
@@ -854,8 +897,9 @@ void ipw_led_activity_on(struct ipw_priv *priv)
854 __ipw_led_activity_on(priv); 897 __ipw_led_activity_on(priv);
855 spin_unlock_irqrestore(&priv->lock, flags); 898 spin_unlock_irqrestore(&priv->lock, flags);
856} 899}
900#endif /* 0 */
857 901
858void ipw_led_activity_off(struct ipw_priv *priv) 902static void ipw_led_activity_off(struct ipw_priv *priv)
859{ 903{
860 unsigned long flags; 904 unsigned long flags;
861 u32 led; 905 u32 led;
@@ -885,12 +929,12 @@ void ipw_led_activity_off(struct ipw_priv *priv)
885static void ipw_bg_led_activity_off(void *data) 929static void ipw_bg_led_activity_off(void *data)
886{ 930{
887 struct ipw_priv *priv = data; 931 struct ipw_priv *priv = data;
888 down(&priv->sem); 932 mutex_lock(&priv->mutex);
889 ipw_led_activity_off(data); 933 ipw_led_activity_off(data);
890 up(&priv->sem); 934 mutex_unlock(&priv->mutex);
891} 935}
892 936
893void ipw_led_band_on(struct ipw_priv *priv) 937static void ipw_led_band_on(struct ipw_priv *priv)
894{ 938{
895 unsigned long flags; 939 unsigned long flags;
896 u32 led; 940 u32 led;
@@ -925,7 +969,7 @@ void ipw_led_band_on(struct ipw_priv *priv)
925 spin_unlock_irqrestore(&priv->lock, flags); 969 spin_unlock_irqrestore(&priv->lock, flags);
926} 970}
927 971
928void ipw_led_band_off(struct ipw_priv *priv) 972static void ipw_led_band_off(struct ipw_priv *priv)
929{ 973{
930 unsigned long flags; 974 unsigned long flags;
931 u32 led; 975 u32 led;
@@ -948,24 +992,24 @@ void ipw_led_band_off(struct ipw_priv *priv)
948 spin_unlock_irqrestore(&priv->lock, flags); 992 spin_unlock_irqrestore(&priv->lock, flags);
949} 993}
950 994
951void ipw_led_radio_on(struct ipw_priv *priv) 995static void ipw_led_radio_on(struct ipw_priv *priv)
952{ 996{
953 ipw_led_link_on(priv); 997 ipw_led_link_on(priv);
954} 998}
955 999
956void ipw_led_radio_off(struct ipw_priv *priv) 1000static void ipw_led_radio_off(struct ipw_priv *priv)
957{ 1001{
958 ipw_led_activity_off(priv); 1002 ipw_led_activity_off(priv);
959 ipw_led_link_off(priv); 1003 ipw_led_link_off(priv);
960} 1004}
961 1005
962void ipw_led_link_up(struct ipw_priv *priv) 1006static void ipw_led_link_up(struct ipw_priv *priv)
963{ 1007{
964 /* Set the Link Led on for all nic types */ 1008 /* Set the Link Led on for all nic types */
965 ipw_led_link_on(priv); 1009 ipw_led_link_on(priv);
966} 1010}
967 1011
968void ipw_led_link_down(struct ipw_priv *priv) 1012static void ipw_led_link_down(struct ipw_priv *priv)
969{ 1013{
970 ipw_led_activity_off(priv); 1014 ipw_led_activity_off(priv);
971 ipw_led_link_off(priv); 1015 ipw_led_link_off(priv);
@@ -974,7 +1018,7 @@ void ipw_led_link_down(struct ipw_priv *priv)
974 ipw_led_radio_off(priv); 1018 ipw_led_radio_off(priv);
975} 1019}
976 1020
977void ipw_led_init(struct ipw_priv *priv) 1021static void ipw_led_init(struct ipw_priv *priv)
978{ 1022{
979 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; 1023 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
980 1024
@@ -1025,7 +1069,7 @@ void ipw_led_init(struct ipw_priv *priv)
1025 } 1069 }
1026} 1070}
1027 1071
1028void ipw_led_shutdown(struct ipw_priv *priv) 1072static void ipw_led_shutdown(struct ipw_priv *priv)
1029{ 1073{
1030 ipw_led_activity_off(priv); 1074 ipw_led_activity_off(priv);
1031 ipw_led_link_off(priv); 1075 ipw_led_link_off(priv);
@@ -1074,6 +1118,7 @@ static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1074 1118
1075static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) 1119static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1076{ 1120{
1121 /* length = 1st dword in log */
1077 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); 1122 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1078} 1123}
1079 1124
@@ -1603,7 +1648,7 @@ static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1603 break; 1648 break;
1604 } 1649 }
1605 1650
1606 if (ipw_is_valid_channel(priv->ieee, channel)) 1651 if (ieee80211_is_valid_channel(priv->ieee, channel))
1607 priv->speed_scan[pos++] = channel; 1652 priv->speed_scan[pos++] = channel;
1608 else 1653 else
1609 IPW_WARNING("Skipping invalid channel request: %d\n", 1654 IPW_WARNING("Skipping invalid channel request: %d\n",
@@ -1751,9 +1796,9 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1751 } 1796 }
1752 1797
1753 if (inta & IPW_INTA_BIT_FATAL_ERROR) { 1798 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1754 IPW_ERROR("Firmware error detected. Restarting.\n"); 1799 IPW_WARNING("Firmware error detected. Restarting.\n");
1755 if (priv->error) { 1800 if (priv->error) {
1756 IPW_ERROR("Sysfs 'error' log already exists.\n"); 1801 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1757#ifdef CONFIG_IPW2200_DEBUG 1802#ifdef CONFIG_IPW2200_DEBUG
1758 if (ipw_debug_level & IPW_DL_FW_ERRORS) { 1803 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1759 struct ipw_fw_error *error = 1804 struct ipw_fw_error *error =
@@ -1766,10 +1811,10 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1766 } else { 1811 } else {
1767 priv->error = ipw_alloc_error_log(priv); 1812 priv->error = ipw_alloc_error_log(priv);
1768 if (priv->error) 1813 if (priv->error)
1769 IPW_ERROR("Sysfs 'error' log captured.\n"); 1814 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1770 else 1815 else
1771 IPW_ERROR("Error allocating sysfs 'error' " 1816 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1772 "log.\n"); 1817 "log.\n");
1773#ifdef CONFIG_IPW2200_DEBUG 1818#ifdef CONFIG_IPW2200_DEBUG
1774 if (ipw_debug_level & IPW_DL_FW_ERRORS) 1819 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1775 ipw_dump_error_log(priv, priv->error); 1820 ipw_dump_error_log(priv, priv->error);
@@ -1870,7 +1915,8 @@ static char *get_cmd_string(u8 cmd)
1870} 1915}
1871 1916
1872#define HOST_COMPLETE_TIMEOUT HZ 1917#define HOST_COMPLETE_TIMEOUT HZ
1873static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) 1918
1919static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1874{ 1920{
1875 int rc = 0; 1921 int rc = 0;
1876 unsigned long flags; 1922 unsigned long flags;
@@ -1897,9 +1943,15 @@ static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1897 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", 1943 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
1898 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, 1944 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
1899 priv->status); 1945 priv->status);
1900 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1901 1946
1902 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0); 1947#ifndef DEBUG_CMD_WEP_KEY
1948 if (cmd->cmd == IPW_CMD_WEP_KEY)
1949 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
1950 else
1951#endif
1952 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1953
1954 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
1903 if (rc) { 1955 if (rc) {
1904 priv->status &= ~STATUS_HCMD_ACTIVE; 1956 priv->status &= ~STATUS_HCMD_ACTIVE;
1905 IPW_ERROR("Failed to send %s: Reason %d\n", 1957 IPW_ERROR("Failed to send %s: Reason %d\n",
@@ -1942,61 +1994,62 @@ static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1942 return rc; 1994 return rc;
1943} 1995}
1944 1996
1945static int ipw_send_host_complete(struct ipw_priv *priv) 1997static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
1998{
1999 struct host_cmd cmd = {
2000 .cmd = command,
2001 };
2002
2003 return __ipw_send_cmd(priv, &cmd);
2004}
2005
2006static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2007 void *data)
1946{ 2008{
1947 struct host_cmd cmd = { 2009 struct host_cmd cmd = {
1948 .cmd = IPW_CMD_HOST_COMPLETE, 2010 .cmd = command,
1949 .len = 0 2011 .len = len,
2012 .param = data,
1950 }; 2013 };
1951 2014
2015 return __ipw_send_cmd(priv, &cmd);
2016}
2017
2018static int ipw_send_host_complete(struct ipw_priv *priv)
2019{
1952 if (!priv) { 2020 if (!priv) {
1953 IPW_ERROR("Invalid args\n"); 2021 IPW_ERROR("Invalid args\n");
1954 return -1; 2022 return -1;
1955 } 2023 }
1956 2024
1957 return ipw_send_cmd(priv, &cmd); 2025 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
1958} 2026}
1959 2027
1960static int ipw_send_system_config(struct ipw_priv *priv, 2028static int ipw_send_system_config(struct ipw_priv *priv,
1961 struct ipw_sys_config *config) 2029 struct ipw_sys_config *config)
1962{ 2030{
1963 struct host_cmd cmd = {
1964 .cmd = IPW_CMD_SYSTEM_CONFIG,
1965 .len = sizeof(*config)
1966 };
1967
1968 if (!priv || !config) { 2031 if (!priv || !config) {
1969 IPW_ERROR("Invalid args\n"); 2032 IPW_ERROR("Invalid args\n");
1970 return -1; 2033 return -1;
1971 } 2034 }
1972 2035
1973 memcpy(cmd.param, config, sizeof(*config)); 2036 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
1974 return ipw_send_cmd(priv, &cmd); 2037 config);
1975} 2038}
1976 2039
1977static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) 2040static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
1978{ 2041{
1979 struct host_cmd cmd = {
1980 .cmd = IPW_CMD_SSID,
1981 .len = min(len, IW_ESSID_MAX_SIZE)
1982 };
1983
1984 if (!priv || !ssid) { 2042 if (!priv || !ssid) {
1985 IPW_ERROR("Invalid args\n"); 2043 IPW_ERROR("Invalid args\n");
1986 return -1; 2044 return -1;
1987 } 2045 }
1988 2046
1989 memcpy(cmd.param, ssid, cmd.len); 2047 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
1990 return ipw_send_cmd(priv, &cmd); 2048 ssid);
1991} 2049}
1992 2050
1993static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) 2051static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
1994{ 2052{
1995 struct host_cmd cmd = {
1996 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1997 .len = ETH_ALEN
1998 };
1999
2000 if (!priv || !mac) { 2053 if (!priv || !mac) {
2001 IPW_ERROR("Invalid args\n"); 2054 IPW_ERROR("Invalid args\n");
2002 return -1; 2055 return -1;
@@ -2005,8 +2058,7 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2005 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n", 2058 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2006 priv->net_dev->name, MAC_ARG(mac)); 2059 priv->net_dev->name, MAC_ARG(mac));
2007 2060
2008 memcpy(cmd.param, mac, ETH_ALEN); 2061 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2009 return ipw_send_cmd(priv, &cmd);
2010} 2062}
2011 2063
2012/* 2064/*
@@ -2036,9 +2088,9 @@ static void ipw_adapter_restart(void *adapter)
2036static void ipw_bg_adapter_restart(void *data) 2088static void ipw_bg_adapter_restart(void *data)
2037{ 2089{
2038 struct ipw_priv *priv = data; 2090 struct ipw_priv *priv = data;
2039 down(&priv->sem); 2091 mutex_lock(&priv->mutex);
2040 ipw_adapter_restart(data); 2092 ipw_adapter_restart(data);
2041 up(&priv->sem); 2093 mutex_unlock(&priv->mutex);
2042} 2094}
2043 2095
2044#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) 2096#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
@@ -2048,8 +2100,8 @@ static void ipw_scan_check(void *data)
2048 struct ipw_priv *priv = data; 2100 struct ipw_priv *priv = data;
2049 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { 2101 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2050 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2102 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2051 "adapter (%dms).\n", 2103 "adapter after (%dms).\n",
2052 IPW_SCAN_CHECK_WATCHDOG / 100); 2104 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2053 queue_work(priv->workqueue, &priv->adapter_restart); 2105 queue_work(priv->workqueue, &priv->adapter_restart);
2054 } 2106 }
2055} 2107}
@@ -2057,59 +2109,48 @@ static void ipw_scan_check(void *data)
2057static void ipw_bg_scan_check(void *data) 2109static void ipw_bg_scan_check(void *data)
2058{ 2110{
2059 struct ipw_priv *priv = data; 2111 struct ipw_priv *priv = data;
2060 down(&priv->sem); 2112 mutex_lock(&priv->mutex);
2061 ipw_scan_check(data); 2113 ipw_scan_check(data);
2062 up(&priv->sem); 2114 mutex_unlock(&priv->mutex);
2063} 2115}
2064 2116
2065static int ipw_send_scan_request_ext(struct ipw_priv *priv, 2117static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2066 struct ipw_scan_request_ext *request) 2118 struct ipw_scan_request_ext *request)
2067{ 2119{
2068 struct host_cmd cmd = { 2120 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2069 .cmd = IPW_CMD_SCAN_REQUEST_EXT, 2121 sizeof(*request), request);
2070 .len = sizeof(*request)
2071 };
2072
2073 memcpy(cmd.param, request, sizeof(*request));
2074 return ipw_send_cmd(priv, &cmd);
2075} 2122}
2076 2123
2077static int ipw_send_scan_abort(struct ipw_priv *priv) 2124static int ipw_send_scan_abort(struct ipw_priv *priv)
2078{ 2125{
2079 struct host_cmd cmd = {
2080 .cmd = IPW_CMD_SCAN_ABORT,
2081 .len = 0
2082 };
2083
2084 if (!priv) { 2126 if (!priv) {
2085 IPW_ERROR("Invalid args\n"); 2127 IPW_ERROR("Invalid args\n");
2086 return -1; 2128 return -1;
2087 } 2129 }
2088 2130
2089 return ipw_send_cmd(priv, &cmd); 2131 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2090} 2132}
2091 2133
2092static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) 2134static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2093{ 2135{
2094 struct host_cmd cmd = { 2136 struct ipw_sensitivity_calib calib = {
2095 .cmd = IPW_CMD_SENSITIVITY_CALIB, 2137 .beacon_rssi_raw = sens,
2096 .len = sizeof(struct ipw_sensitivity_calib)
2097 }; 2138 };
2098 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *) 2139
2099 &cmd.param; 2140 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2100 calib->beacon_rssi_raw = sens; 2141 &calib);
2101 return ipw_send_cmd(priv, &cmd);
2102} 2142}
2103 2143
2104static int ipw_send_associate(struct ipw_priv *priv, 2144static int ipw_send_associate(struct ipw_priv *priv,
2105 struct ipw_associate *associate) 2145 struct ipw_associate *associate)
2106{ 2146{
2107 struct host_cmd cmd = {
2108 .cmd = IPW_CMD_ASSOCIATE,
2109 .len = sizeof(*associate)
2110 };
2111
2112 struct ipw_associate tmp_associate; 2147 struct ipw_associate tmp_associate;
2148
2149 if (!priv || !associate) {
2150 IPW_ERROR("Invalid args\n");
2151 return -1;
2152 }
2153
2113 memcpy(&tmp_associate, associate, sizeof(*associate)); 2154 memcpy(&tmp_associate, associate, sizeof(*associate));
2114 tmp_associate.policy_support = 2155 tmp_associate.policy_support =
2115 cpu_to_le16(tmp_associate.policy_support); 2156 cpu_to_le16(tmp_associate.policy_support);
@@ -2122,85 +2163,60 @@ static int ipw_send_associate(struct ipw_priv *priv,
2122 cpu_to_le16(tmp_associate.beacon_interval); 2163 cpu_to_le16(tmp_associate.beacon_interval);
2123 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window); 2164 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2124 2165
2125 if (!priv || !associate) { 2166 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2126 IPW_ERROR("Invalid args\n"); 2167 &tmp_associate);
2127 return -1;
2128 }
2129
2130 memcpy(cmd.param, &tmp_associate, sizeof(*associate));
2131 return ipw_send_cmd(priv, &cmd);
2132} 2168}
2133 2169
2134static int ipw_send_supported_rates(struct ipw_priv *priv, 2170static int ipw_send_supported_rates(struct ipw_priv *priv,
2135 struct ipw_supported_rates *rates) 2171 struct ipw_supported_rates *rates)
2136{ 2172{
2137 struct host_cmd cmd = {
2138 .cmd = IPW_CMD_SUPPORTED_RATES,
2139 .len = sizeof(*rates)
2140 };
2141
2142 if (!priv || !rates) { 2173 if (!priv || !rates) {
2143 IPW_ERROR("Invalid args\n"); 2174 IPW_ERROR("Invalid args\n");
2144 return -1; 2175 return -1;
2145 } 2176 }
2146 2177
2147 memcpy(cmd.param, rates, sizeof(*rates)); 2178 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2148 return ipw_send_cmd(priv, &cmd); 2179 rates);
2149} 2180}
2150 2181
2151static int ipw_set_random_seed(struct ipw_priv *priv) 2182static int ipw_set_random_seed(struct ipw_priv *priv)
2152{ 2183{
2153 struct host_cmd cmd = { 2184 u32 val;
2154 .cmd = IPW_CMD_SEED_NUMBER,
2155 .len = sizeof(u32)
2156 };
2157 2185
2158 if (!priv) { 2186 if (!priv) {
2159 IPW_ERROR("Invalid args\n"); 2187 IPW_ERROR("Invalid args\n");
2160 return -1; 2188 return -1;
2161 } 2189 }
2162 2190
2163 get_random_bytes(&cmd.param, sizeof(u32)); 2191 get_random_bytes(&val, sizeof(val));
2164 2192
2165 return ipw_send_cmd(priv, &cmd); 2193 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2166} 2194}
2167 2195
2168static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) 2196static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2169{ 2197{
2170 struct host_cmd cmd = {
2171 .cmd = IPW_CMD_CARD_DISABLE,
2172 .len = sizeof(u32)
2173 };
2174
2175 if (!priv) { 2198 if (!priv) {
2176 IPW_ERROR("Invalid args\n"); 2199 IPW_ERROR("Invalid args\n");
2177 return -1; 2200 return -1;
2178 } 2201 }
2179 2202
2180 *((u32 *) & cmd.param) = phy_off; 2203 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2181 2204 &phy_off);
2182 return ipw_send_cmd(priv, &cmd);
2183} 2205}
2184 2206
2185static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) 2207static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2186{ 2208{
2187 struct host_cmd cmd = {
2188 .cmd = IPW_CMD_TX_POWER,
2189 .len = sizeof(*power)
2190 };
2191
2192 if (!priv || !power) { 2209 if (!priv || !power) {
2193 IPW_ERROR("Invalid args\n"); 2210 IPW_ERROR("Invalid args\n");
2194 return -1; 2211 return -1;
2195 } 2212 }
2196 2213
2197 memcpy(cmd.param, power, sizeof(*power)); 2214 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2198 return ipw_send_cmd(priv, &cmd);
2199} 2215}
2200 2216
2201static int ipw_set_tx_power(struct ipw_priv *priv) 2217static int ipw_set_tx_power(struct ipw_priv *priv)
2202{ 2218{
2203 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); 2219 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2204 struct ipw_tx_power tx_power; 2220 struct ipw_tx_power tx_power;
2205 s8 max_power; 2221 s8 max_power;
2206 int i; 2222 int i;
@@ -2247,18 +2263,14 @@ static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2247 struct ipw_rts_threshold rts_threshold = { 2263 struct ipw_rts_threshold rts_threshold = {
2248 .rts_threshold = rts, 2264 .rts_threshold = rts,
2249 }; 2265 };
2250 struct host_cmd cmd = {
2251 .cmd = IPW_CMD_RTS_THRESHOLD,
2252 .len = sizeof(rts_threshold)
2253 };
2254 2266
2255 if (!priv) { 2267 if (!priv) {
2256 IPW_ERROR("Invalid args\n"); 2268 IPW_ERROR("Invalid args\n");
2257 return -1; 2269 return -1;
2258 } 2270 }
2259 2271
2260 memcpy(cmd.param, &rts_threshold, sizeof(rts_threshold)); 2272 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2261 return ipw_send_cmd(priv, &cmd); 2273 sizeof(rts_threshold), &rts_threshold);
2262} 2274}
2263 2275
2264static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) 2276static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
@@ -2266,27 +2278,19 @@ static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2266 struct ipw_frag_threshold frag_threshold = { 2278 struct ipw_frag_threshold frag_threshold = {
2267 .frag_threshold = frag, 2279 .frag_threshold = frag,
2268 }; 2280 };
2269 struct host_cmd cmd = {
2270 .cmd = IPW_CMD_FRAG_THRESHOLD,
2271 .len = sizeof(frag_threshold)
2272 };
2273 2281
2274 if (!priv) { 2282 if (!priv) {
2275 IPW_ERROR("Invalid args\n"); 2283 IPW_ERROR("Invalid args\n");
2276 return -1; 2284 return -1;
2277 } 2285 }
2278 2286
2279 memcpy(cmd.param, &frag_threshold, sizeof(frag_threshold)); 2287 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2280 return ipw_send_cmd(priv, &cmd); 2288 sizeof(frag_threshold), &frag_threshold);
2281} 2289}
2282 2290
2283static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) 2291static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2284{ 2292{
2285 struct host_cmd cmd = { 2293 u32 param;
2286 .cmd = IPW_CMD_POWER_MODE,
2287 .len = sizeof(u32)
2288 };
2289 u32 *param = (u32 *) (&cmd.param);
2290 2294
2291 if (!priv) { 2295 if (!priv) {
2292 IPW_ERROR("Invalid args\n"); 2296 IPW_ERROR("Invalid args\n");
@@ -2297,17 +2301,18 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2297 * level */ 2301 * level */
2298 switch (mode) { 2302 switch (mode) {
2299 case IPW_POWER_BATTERY: 2303 case IPW_POWER_BATTERY:
2300 *param = IPW_POWER_INDEX_3; 2304 param = IPW_POWER_INDEX_3;
2301 break; 2305 break;
2302 case IPW_POWER_AC: 2306 case IPW_POWER_AC:
2303 *param = IPW_POWER_MODE_CAM; 2307 param = IPW_POWER_MODE_CAM;
2304 break; 2308 break;
2305 default: 2309 default:
2306 *param = mode; 2310 param = mode;
2307 break; 2311 break;
2308 } 2312 }
2309 2313
2310 return ipw_send_cmd(priv, &cmd); 2314 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2315 &param);
2311} 2316}
2312 2317
2313static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) 2318static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
@@ -2316,18 +2321,14 @@ static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2316 .short_retry_limit = slimit, 2321 .short_retry_limit = slimit,
2317 .long_retry_limit = llimit 2322 .long_retry_limit = llimit
2318 }; 2323 };
2319 struct host_cmd cmd = {
2320 .cmd = IPW_CMD_RETRY_LIMIT,
2321 .len = sizeof(retry_limit)
2322 };
2323 2324
2324 if (!priv) { 2325 if (!priv) {
2325 IPW_ERROR("Invalid args\n"); 2326 IPW_ERROR("Invalid args\n");
2326 return -1; 2327 return -1;
2327 } 2328 }
2328 2329
2329 memcpy(cmd.param, &retry_limit, sizeof(retry_limit)); 2330 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2330 return ipw_send_cmd(priv, &cmd); 2331 &retry_limit);
2331} 2332}
2332 2333
2333/* 2334/*
@@ -2454,7 +2455,7 @@ static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2454 /* 2455 /*
2455 If the data looks correct, then copy it to our private 2456 If the data looks correct, then copy it to our private
2456 copy. Otherwise let the firmware know to perform the operation 2457 copy. Otherwise let the firmware know to perform the operation
2457 on it's own 2458 on its own.
2458 */ 2459 */
2459 if (priv->eeprom[EEPROM_VERSION] != 0) { 2460 if (priv->eeprom[EEPROM_VERSION] != 0) {
2460 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); 2461 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
@@ -2707,22 +2708,25 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2707 2708
2708static int ipw_fw_dma_wait(struct ipw_priv *priv) 2709static int ipw_fw_dma_wait(struct ipw_priv *priv)
2709{ 2710{
2710 u32 current_index = 0; 2711 u32 current_index = 0, previous_index;
2711 u32 watchdog = 0; 2712 u32 watchdog = 0;
2712 2713
2713 IPW_DEBUG_FW(">> : \n"); 2714 IPW_DEBUG_FW(">> : \n");
2714 2715
2715 current_index = ipw_fw_dma_command_block_index(priv); 2716 current_index = ipw_fw_dma_command_block_index(priv);
2716 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n", 2717 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2717 (int)priv->sram_desc.last_cb_index); 2718 (int)priv->sram_desc.last_cb_index);
2718 2719
2719 while (current_index < priv->sram_desc.last_cb_index) { 2720 while (current_index < priv->sram_desc.last_cb_index) {
2720 udelay(50); 2721 udelay(50);
2722 previous_index = current_index;
2721 current_index = ipw_fw_dma_command_block_index(priv); 2723 current_index = ipw_fw_dma_command_block_index(priv);
2722 2724
2723 watchdog++; 2725 if (previous_index < current_index) {
2724 2726 watchdog = 0;
2725 if (watchdog > 400) { 2727 continue;
2728 }
2729 if (++watchdog > 400) {
2726 IPW_DEBUG_FW_INFO("Timeout\n"); 2730 IPW_DEBUG_FW_INFO("Timeout\n");
2727 ipw_fw_dma_dump_command_block(priv); 2731 ipw_fw_dma_dump_command_block(priv);
2728 ipw_fw_dma_abort(priv); 2732 ipw_fw_dma_abort(priv);
@@ -2772,6 +2776,7 @@ static inline int ipw_alive(struct ipw_priv *priv)
2772 return ipw_read32(priv, 0x90) == 0xd55555d5; 2776 return ipw_read32(priv, 0x90) == 0xd55555d5;
2773} 2777}
2774 2778
2779/* timeout in msec, attempted in 10-msec quanta */
2775static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, 2780static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2776 int timeout) 2781 int timeout)
2777{ 2782{
@@ -2800,10 +2805,11 @@ static int ipw_stop_master(struct ipw_priv *priv)
2800 /* stop master. typical delay - 0 */ 2805 /* stop master. typical delay - 0 */
2801 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 2806 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2802 2807
2808 /* timeout is in msec, polled in 10-msec quanta */
2803 rc = ipw_poll_bit(priv, IPW_RESET_REG, 2809 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2804 IPW_RESET_REG_MASTER_DISABLED, 100); 2810 IPW_RESET_REG_MASTER_DISABLED, 100);
2805 if (rc < 0) { 2811 if (rc < 0) {
2806 IPW_ERROR("stop master failed in 10ms\n"); 2812 IPW_ERROR("wait for stop master failed after 100ms\n");
2807 return -1; 2813 return -1;
2808 } 2814 }
2809 2815
@@ -2823,33 +2829,11 @@ static void ipw_arc_release(struct ipw_priv *priv)
2823 mdelay(5); 2829 mdelay(5);
2824} 2830}
2825 2831
2826struct fw_header {
2827 u32 version;
2828 u32 mode;
2829};
2830
2831struct fw_chunk { 2832struct fw_chunk {
2832 u32 address; 2833 u32 address;
2833 u32 length; 2834 u32 length;
2834}; 2835};
2835 2836
2836#define IPW_FW_MAJOR_VERSION 2
2837#define IPW_FW_MINOR_VERSION 4
2838
2839#define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2840#define IPW_FW_MAJOR(x) (x & 0xff)
2841
2842#define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | IPW_FW_MAJOR_VERSION)
2843
2844#define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2845"." __stringify(IPW_FW_MINOR_VERSION) "-"
2846
2847#if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2848#define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2849#else
2850#define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2851#endif
2852
2853static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) 2837static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2854{ 2838{
2855 int rc = 0, i, addr; 2839 int rc = 0, i, addr;
@@ -2890,8 +2874,8 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2890 mdelay(1); 2874 mdelay(1);
2891 2875
2892 /* enable ucode store */ 2876 /* enable ucode store */
2893 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0); 2877 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
2894 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS); 2878 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
2895 mdelay(1); 2879 mdelay(1);
2896 2880
2897 /* write ucode */ 2881 /* write ucode */
@@ -3036,7 +3020,7 @@ static int ipw_stop_nic(struct ipw_priv *priv)
3036 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3020 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3037 IPW_RESET_REG_MASTER_DISABLED, 500); 3021 IPW_RESET_REG_MASTER_DISABLED, 500);
3038 if (rc < 0) { 3022 if (rc < 0) {
3039 IPW_ERROR("wait for reg master disabled failed\n"); 3023 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3040 return rc; 3024 return rc;
3041 } 3025 }
3042 3026
@@ -3118,33 +3102,47 @@ static int ipw_reset_nic(struct ipw_priv *priv)
3118 return rc; 3102 return rc;
3119} 3103}
3120 3104
3105
3106struct ipw_fw {
3107 u32 ver;
3108 u32 boot_size;
3109 u32 ucode_size;
3110 u32 fw_size;
3111 u8 data[0];
3112};
3113
3121static int ipw_get_fw(struct ipw_priv *priv, 3114static int ipw_get_fw(struct ipw_priv *priv,
3122 const struct firmware **fw, const char *name) 3115 const struct firmware **raw, const char *name)
3123{ 3116{
3124 struct fw_header *header; 3117 struct ipw_fw *fw;
3125 int rc; 3118 int rc;
3126 3119
3127 /* ask firmware_class module to get the boot firmware off disk */ 3120 /* ask firmware_class module to get the boot firmware off disk */
3128 rc = request_firmware(fw, name, &priv->pci_dev->dev); 3121 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3129 if (rc < 0) { 3122 if (rc < 0) {
3130 IPW_ERROR("%s load failed: Reason %d\n", name, rc); 3123 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3131 return rc; 3124 return rc;
3132 } 3125 }
3133 3126
3134 header = (struct fw_header *)(*fw)->data; 3127 if ((*raw)->size < sizeof(*fw)) {
3135 if (IPW_FW_MAJOR(le32_to_cpu(header->version)) != IPW_FW_MAJOR_VERSION) { 3128 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3136 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n", 3129 return -EINVAL;
3137 name, 3130 }
3138 IPW_FW_MAJOR(le32_to_cpu(header->version)), 3131
3139 IPW_FW_MAJOR_VERSION); 3132 fw = (void *)(*raw)->data;
3133
3134 if ((*raw)->size < sizeof(*fw) +
3135 fw->boot_size + fw->ucode_size + fw->fw_size) {
3136 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3137 name, (*raw)->size);
3140 return -EINVAL; 3138 return -EINVAL;
3141 } 3139 }
3142 3140
3143 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n", 3141 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3144 name, 3142 name,
3145 IPW_FW_MAJOR(le32_to_cpu(header->version)), 3143 le32_to_cpu(fw->ver) >> 16,
3146 IPW_FW_MINOR(le32_to_cpu(header->version)), 3144 le32_to_cpu(fw->ver) & 0xff,
3147 (*fw)->size - sizeof(struct fw_header)); 3145 (*raw)->size - sizeof(*fw));
3148 return 0; 3146 return 0;
3149} 3147}
3150 3148
@@ -3184,17 +3182,13 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
3184 3182
3185#ifdef CONFIG_PM 3183#ifdef CONFIG_PM
3186static int fw_loaded = 0; 3184static int fw_loaded = 0;
3187static const struct firmware *bootfw = NULL; 3185static const struct firmware *raw = NULL;
3188static const struct firmware *firmware = NULL;
3189static const struct firmware *ucode = NULL;
3190 3186
3191static void free_firmware(void) 3187static void free_firmware(void)
3192{ 3188{
3193 if (fw_loaded) { 3189 if (fw_loaded) {
3194 release_firmware(bootfw); 3190 release_firmware(raw);
3195 release_firmware(ucode); 3191 raw = NULL;
3196 release_firmware(firmware);
3197 bootfw = ucode = firmware = NULL;
3198 fw_loaded = 0; 3192 fw_loaded = 0;
3199 } 3193 }
3200} 3194}
@@ -3205,60 +3199,50 @@ static void free_firmware(void)
3205static int ipw_load(struct ipw_priv *priv) 3199static int ipw_load(struct ipw_priv *priv)
3206{ 3200{
3207#ifndef CONFIG_PM 3201#ifndef CONFIG_PM
3208 const struct firmware *bootfw = NULL; 3202 const struct firmware *raw = NULL;
3209 const struct firmware *firmware = NULL;
3210 const struct firmware *ucode = NULL;
3211#endif 3203#endif
3204 struct ipw_fw *fw;
3205 u8 *boot_img, *ucode_img, *fw_img;
3206 u8 *name = NULL;
3212 int rc = 0, retries = 3; 3207 int rc = 0, retries = 3;
3213 3208
3214#ifdef CONFIG_PM 3209 switch (priv->ieee->iw_mode) {
3215 if (!fw_loaded) { 3210 case IW_MODE_ADHOC:
3216#endif 3211 name = "ipw2200-ibss.fw";
3217 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot")); 3212 break;
3218 if (rc)
3219 goto error;
3220
3221 switch (priv->ieee->iw_mode) {
3222 case IW_MODE_ADHOC:
3223 rc = ipw_get_fw(priv, &ucode,
3224 IPW_FW_NAME("ibss_ucode"));
3225 if (rc)
3226 goto error;
3227
3228 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
3229 break;
3230
3231#ifdef CONFIG_IPW2200_MONITOR 3213#ifdef CONFIG_IPW2200_MONITOR
3232 case IW_MODE_MONITOR: 3214 case IW_MODE_MONITOR:
3233 rc = ipw_get_fw(priv, &ucode, 3215 name = "ipw2200-sniffer.fw";
3234 IPW_FW_NAME("sniffer_ucode")); 3216 break;
3235 if (rc)
3236 goto error;
3237
3238 rc = ipw_get_fw(priv, &firmware,
3239 IPW_FW_NAME("sniffer"));
3240 break;
3241#endif 3217#endif
3242 case IW_MODE_INFRA: 3218 case IW_MODE_INFRA:
3243 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode")); 3219 name = "ipw2200-bss.fw";
3244 if (rc) 3220 break;
3245 goto error; 3221 }
3246
3247 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
3248 break;
3249 3222
3250 default: 3223 if (!name) {
3251 rc = -EINVAL; 3224 rc = -EINVAL;
3252 } 3225 goto error;
3226 }
3253 3227
3254 if (rc) 3228#ifdef CONFIG_PM
3229 if (!fw_loaded) {
3230#endif
3231 rc = ipw_get_fw(priv, &raw, name);
3232 if (rc < 0)
3255 goto error; 3233 goto error;
3256
3257#ifdef CONFIG_PM 3234#ifdef CONFIG_PM
3258 fw_loaded = 1;
3259 } 3235 }
3260#endif 3236#endif
3261 3237
3238 fw = (void *)raw->data;
3239 boot_img = &fw->data[0];
3240 ucode_img = &fw->data[fw->boot_size];
3241 fw_img = &fw->data[fw->boot_size + fw->ucode_size];
3242
3243 if (rc < 0)
3244 goto error;
3245
3262 if (!priv->rxq) 3246 if (!priv->rxq)
3263 priv->rxq = ipw_rx_queue_alloc(priv); 3247 priv->rxq = ipw_rx_queue_alloc(priv);
3264 else 3248 else
@@ -3279,7 +3263,7 @@ static int ipw_load(struct ipw_priv *priv)
3279 ipw_stop_nic(priv); 3263 ipw_stop_nic(priv);
3280 3264
3281 rc = ipw_reset_nic(priv); 3265 rc = ipw_reset_nic(priv);
3282 if (rc) { 3266 if (rc < 0) {
3283 IPW_ERROR("Unable to reset NIC\n"); 3267 IPW_ERROR("Unable to reset NIC\n");
3284 goto error; 3268 goto error;
3285 } 3269 }
@@ -3288,8 +3272,7 @@ static int ipw_load(struct ipw_priv *priv)
3288 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); 3272 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3289 3273
3290 /* DMA the initial boot firmware into the device */ 3274 /* DMA the initial boot firmware into the device */
3291 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header), 3275 rc = ipw_load_firmware(priv, boot_img, fw->boot_size);
3292 bootfw->size - sizeof(struct fw_header));
3293 if (rc < 0) { 3276 if (rc < 0) {
3294 IPW_ERROR("Unable to load boot firmware: %d\n", rc); 3277 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3295 goto error; 3278 goto error;
@@ -3298,7 +3281,7 @@ static int ipw_load(struct ipw_priv *priv)
3298 /* kick start the device */ 3281 /* kick start the device */
3299 ipw_start_nic(priv); 3282 ipw_start_nic(priv);
3300 3283
3301 /* wait for the device to finish it's initial startup sequence */ 3284 /* wait for the device to finish its initial startup sequence */
3302 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3285 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3303 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3286 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3304 if (rc < 0) { 3287 if (rc < 0) {
@@ -3311,8 +3294,7 @@ static int ipw_load(struct ipw_priv *priv)
3311 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3294 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3312 3295
3313 /* DMA the ucode into the device */ 3296 /* DMA the ucode into the device */
3314 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header), 3297 rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size);
3315 ucode->size - sizeof(struct fw_header));
3316 if (rc < 0) { 3298 if (rc < 0) {
3317 IPW_ERROR("Unable to load ucode: %d\n", rc); 3299 IPW_ERROR("Unable to load ucode: %d\n", rc);
3318 goto error; 3300 goto error;
@@ -3322,18 +3304,19 @@ static int ipw_load(struct ipw_priv *priv)
3322 ipw_stop_nic(priv); 3304 ipw_stop_nic(priv);
3323 3305
3324 /* DMA bss firmware into the device */ 3306 /* DMA bss firmware into the device */
3325 rc = ipw_load_firmware(priv, firmware->data + 3307 rc = ipw_load_firmware(priv, fw_img, fw->fw_size);
3326 sizeof(struct fw_header),
3327 firmware->size - sizeof(struct fw_header));
3328 if (rc < 0) { 3308 if (rc < 0) {
3329 IPW_ERROR("Unable to load firmware: %d\n", rc); 3309 IPW_ERROR("Unable to load firmware: %d\n", rc);
3330 goto error; 3310 goto error;
3331 } 3311 }
3312#ifdef CONFIG_PM
3313 fw_loaded = 1;
3314#endif
3332 3315
3333 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 3316 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3334 3317
3335 rc = ipw_queue_reset(priv); 3318 rc = ipw_queue_reset(priv);
3336 if (rc) { 3319 if (rc < 0) {
3337 IPW_ERROR("Unable to initialize queues\n"); 3320 IPW_ERROR("Unable to initialize queues\n");
3338 goto error; 3321 goto error;
3339 } 3322 }
@@ -3362,7 +3345,7 @@ static int ipw_load(struct ipw_priv *priv)
3362 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3345 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3363 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3346 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3364 if (rc < 0) { 3347 if (rc < 0) {
3365 IPW_ERROR("device failed to start after 500ms\n"); 3348 IPW_ERROR("device failed to start within 500ms\n");
3366 goto error; 3349 goto error;
3367 } 3350 }
3368 IPW_DEBUG_INFO("device response after %dms\n", rc); 3351 IPW_DEBUG_INFO("device response after %dms\n", rc);
@@ -3386,9 +3369,7 @@ static int ipw_load(struct ipw_priv *priv)
3386 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3369 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3387 3370
3388#ifndef CONFIG_PM 3371#ifndef CONFIG_PM
3389 release_firmware(bootfw); 3372 release_firmware(raw);
3390 release_firmware(ucode);
3391 release_firmware(firmware);
3392#endif 3373#endif
3393 return 0; 3374 return 0;
3394 3375
@@ -3398,15 +3379,11 @@ static int ipw_load(struct ipw_priv *priv)
3398 priv->rxq = NULL; 3379 priv->rxq = NULL;
3399 } 3380 }
3400 ipw_tx_queue_free(priv); 3381 ipw_tx_queue_free(priv);
3401 if (bootfw) 3382 if (raw)
3402 release_firmware(bootfw); 3383 release_firmware(raw);
3403 if (ucode)
3404 release_firmware(ucode);
3405 if (firmware)
3406 release_firmware(firmware);
3407#ifdef CONFIG_PM 3384#ifdef CONFIG_PM
3408 fw_loaded = 0; 3385 fw_loaded = 0;
3409 bootfw = ucode = firmware = NULL; 3386 raw = NULL;
3410#endif 3387#endif
3411 3388
3412 return rc; 3389 return rc;
@@ -3715,9 +3692,9 @@ static int ipw_disassociate(void *data)
3715static void ipw_bg_disassociate(void *data) 3692static void ipw_bg_disassociate(void *data)
3716{ 3693{
3717 struct ipw_priv *priv = data; 3694 struct ipw_priv *priv = data;
3718 down(&priv->sem); 3695 mutex_lock(&priv->mutex);
3719 ipw_disassociate(data); 3696 ipw_disassociate(data);
3720 up(&priv->sem); 3697 mutex_unlock(&priv->mutex);
3721} 3698}
3722 3699
3723static void ipw_system_config(void *data) 3700static void ipw_system_config(void *data)
@@ -4077,9 +4054,9 @@ static void ipw_gather_stats(struct ipw_priv *priv)
4077static void ipw_bg_gather_stats(void *data) 4054static void ipw_bg_gather_stats(void *data)
4078{ 4055{
4079 struct ipw_priv *priv = data; 4056 struct ipw_priv *priv = data;
4080 down(&priv->sem); 4057 mutex_lock(&priv->mutex);
4081 ipw_gather_stats(data); 4058 ipw_gather_stats(data);
4082 up(&priv->sem); 4059 mutex_unlock(&priv->mutex);
4083} 4060}
4084 4061
4085/* Missed beacon behavior: 4062/* Missed beacon behavior:
@@ -4121,8 +4098,9 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4121 return; 4098 return;
4122 } 4099 }
4123 4100
4124 if (missed_count > priv->roaming_threshold && 4101 if (roaming &&
4125 missed_count <= priv->disassociate_threshold) { 4102 (missed_count > priv->roaming_threshold &&
4103 missed_count <= priv->disassociate_threshold)) {
4126 /* If we are not already roaming, set the ROAM 4104 /* If we are not already roaming, set the ROAM
4127 * bit in the status and kick off a scan. 4105 * bit in the status and kick off a scan.
4128 * This can happen several times before we reach 4106 * This can happen several times before we reach
@@ -4150,7 +4128,6 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4150 } 4128 }
4151 4129
4152 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); 4130 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4153
4154} 4131}
4155 4132
4156/** 4133/**
@@ -4527,10 +4504,9 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4527 4504
4528 if (notif->size == sizeof(*x)) { 4505 if (notif->size == sizeof(*x)) {
4529 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4506 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4530 "link deterioration: '%s' " MAC_FMT 4507 "link deterioration: type %d, cnt %d\n",
4531 " \n", escape_essid(priv->essid, 4508 x->silence_notification_type,
4532 priv->essid_len), 4509 x->silence_count);
4533 MAC_ARG(priv->bssid));
4534 memcpy(&priv->last_link_deterioration, x, 4510 memcpy(&priv->last_link_deterioration, x,
4535 sizeof(*x)); 4511 sizeof(*x));
4536 } else { 4512 } else {
@@ -4911,13 +4887,13 @@ static void ipw_rx_queue_replenish(void *data)
4911static void ipw_bg_rx_queue_replenish(void *data) 4887static void ipw_bg_rx_queue_replenish(void *data)
4912{ 4888{
4913 struct ipw_priv *priv = data; 4889 struct ipw_priv *priv = data;
4914 down(&priv->sem); 4890 mutex_lock(&priv->mutex);
4915 ipw_rx_queue_replenish(data); 4891 ipw_rx_queue_replenish(data);
4916 up(&priv->sem); 4892 mutex_unlock(&priv->mutex);
4917} 4893}
4918 4894
4919/* Assumes that the skb field of the buffers in 'pool' is kept accurate. 4895/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4920 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL 4896 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
4921 * This free routine walks the list of POOL entries and if SKB is set to 4897 * This free routine walks the list of POOL entries and if SKB is set to
4922 * non NULL it is unmapped and freed 4898 * non NULL it is unmapped and freed
4923 */ 4899 */
@@ -5257,10 +5233,11 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5257 if (priv->ieee->scan_age != 0 && 5233 if (priv->ieee->scan_age != 0 &&
5258 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5234 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5259 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " 5235 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5260 "because of age: %lums.\n", 5236 "because of age: %ums.\n",
5261 escape_essid(network->ssid, network->ssid_len), 5237 escape_essid(network->ssid, network->ssid_len),
5262 MAC_ARG(network->bssid), 5238 MAC_ARG(network->bssid),
5263 1000 * (jiffies - network->last_scanned) / HZ); 5239 jiffies_to_msecs(jiffies -
5240 network->last_scanned));
5264 return 0; 5241 return 0;
5265 } 5242 }
5266 5243
@@ -5369,7 +5346,7 @@ static void ipw_merge_adhoc_network(void *data)
5369 return; 5346 return;
5370 } 5347 }
5371 5348
5372 down(&priv->sem); 5349 mutex_lock(&priv->mutex);
5373 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5350 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5374 IPW_DEBUG_MERGE("remove network %s\n", 5351 IPW_DEBUG_MERGE("remove network %s\n",
5375 escape_essid(priv->essid, 5352 escape_essid(priv->essid,
@@ -5379,7 +5356,7 @@ static void ipw_merge_adhoc_network(void *data)
5379 5356
5380 ipw_disassociate(priv); 5357 ipw_disassociate(priv);
5381 priv->assoc_network = match.network; 5358 priv->assoc_network = match.network;
5382 up(&priv->sem); 5359 mutex_unlock(&priv->mutex);
5383 return; 5360 return;
5384 } 5361 }
5385} 5362}
@@ -5467,11 +5444,12 @@ static int ipw_best_network(struct ipw_priv *priv,
5467 if (network->last_associate && 5444 if (network->last_associate &&
5468 time_after(network->last_associate + (HZ * 3UL), jiffies)) { 5445 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5469 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " 5446 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5470 "because of storming (%lus since last " 5447 "because of storming (%ums since last "
5471 "assoc attempt).\n", 5448 "assoc attempt).\n",
5472 escape_essid(network->ssid, network->ssid_len), 5449 escape_essid(network->ssid, network->ssid_len),
5473 MAC_ARG(network->bssid), 5450 MAC_ARG(network->bssid),
5474 (jiffies - network->last_associate) / HZ); 5451 jiffies_to_msecs(jiffies -
5452 network->last_associate));
5475 return 0; 5453 return 0;
5476 } 5454 }
5477 5455
@@ -5479,10 +5457,11 @@ static int ipw_best_network(struct ipw_priv *priv,
5479 if (priv->ieee->scan_age != 0 && 5457 if (priv->ieee->scan_age != 0 &&
5480 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5458 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5481 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " 5459 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5482 "because of age: %lums.\n", 5460 "because of age: %ums.\n",
5483 escape_essid(network->ssid, network->ssid_len), 5461 escape_essid(network->ssid, network->ssid_len),
5484 MAC_ARG(network->bssid), 5462 MAC_ARG(network->bssid),
5485 1000 * (jiffies - network->last_scanned) / HZ); 5463 jiffies_to_msecs(jiffies -
5464 network->last_scanned));
5486 return 0; 5465 return 0;
5487 } 5466 }
5488 5467
@@ -5510,15 +5489,6 @@ static int ipw_best_network(struct ipw_priv *priv,
5510 return 0; 5489 return 0;
5511 } 5490 }
5512 5491
5513 if (!priv->ieee->wpa_enabled && (network->wpa_ie_len > 0 ||
5514 network->rsn_ie_len > 0)) {
5515 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5516 "because of WPA capability mismatch.\n",
5517 escape_essid(network->ssid, network->ssid_len),
5518 MAC_ARG(network->bssid));
5519 return 0;
5520 }
5521
5522 if ((priv->config & CFG_STATIC_BSSID) && 5492 if ((priv->config & CFG_STATIC_BSSID) &&
5523 memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 5493 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5524 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " 5494 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
@@ -5539,7 +5509,7 @@ static int ipw_best_network(struct ipw_priv *priv,
5539 } 5509 }
5540 5510
5541 /* Filter out invalid channel in current GEO */ 5511 /* Filter out invalid channel in current GEO */
5542 if (!ipw_is_valid_channel(priv->ieee, network->channel)) { 5512 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5543 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " 5513 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5544 "because of invalid channel in current GEO\n", 5514 "because of invalid channel in current GEO\n",
5545 escape_essid(network->ssid, network->ssid_len), 5515 escape_essid(network->ssid, network->ssid_len),
@@ -5584,7 +5554,7 @@ static int ipw_best_network(struct ipw_priv *priv,
5584static void ipw_adhoc_create(struct ipw_priv *priv, 5554static void ipw_adhoc_create(struct ipw_priv *priv,
5585 struct ieee80211_network *network) 5555 struct ieee80211_network *network)
5586{ 5556{
5587 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); 5557 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5588 int i; 5558 int i;
5589 5559
5590 /* 5560 /*
@@ -5599,10 +5569,10 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
5599 * FW fatal error. 5569 * FW fatal error.
5600 * 5570 *
5601 */ 5571 */
5602 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) { 5572 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5603 case IEEE80211_52GHZ_BAND: 5573 case IEEE80211_52GHZ_BAND:
5604 network->mode = IEEE_A; 5574 network->mode = IEEE_A;
5605 i = ipw_channel_to_index(priv->ieee, priv->channel); 5575 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5606 if (i == -1) 5576 if (i == -1)
5607 BUG(); 5577 BUG();
5608 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) { 5578 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
@@ -5616,7 +5586,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
5616 network->mode = IEEE_G; 5586 network->mode = IEEE_G;
5617 else 5587 else
5618 network->mode = IEEE_B; 5588 network->mode = IEEE_B;
5619 i = ipw_channel_to_index(priv->ieee, priv->channel); 5589 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5620 if (i == -1) 5590 if (i == -1)
5621 BUG(); 5591 BUG();
5622 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) { 5592 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
@@ -5671,54 +5641,44 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
5671 5641
5672static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) 5642static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5673{ 5643{
5674 struct ipw_tgi_tx_key *key; 5644 struct ipw_tgi_tx_key key;
5675 struct host_cmd cmd = {
5676 .cmd = IPW_CMD_TGI_TX_KEY,
5677 .len = sizeof(*key)
5678 };
5679 5645
5680 if (!(priv->ieee->sec.flags & (1 << index))) 5646 if (!(priv->ieee->sec.flags & (1 << index)))
5681 return; 5647 return;
5682 5648
5683 key = (struct ipw_tgi_tx_key *)&cmd.param; 5649 key.key_id = index;
5684 key->key_id = index; 5650 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5685 memcpy(key->key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); 5651 key.security_type = type;
5686 key->security_type = type; 5652 key.station_index = 0; /* always 0 for BSS */
5687 key->station_index = 0; /* always 0 for BSS */ 5653 key.flags = 0;
5688 key->flags = 0;
5689 /* 0 for new key; previous value of counter (after fatal error) */ 5654 /* 0 for new key; previous value of counter (after fatal error) */
5690 key->tx_counter[0] = 0; 5655 key.tx_counter[0] = 0;
5691 key->tx_counter[1] = 0; 5656 key.tx_counter[1] = 0;
5692 5657
5693 ipw_send_cmd(priv, &cmd); 5658 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5694} 5659}
5695 5660
5696static void ipw_send_wep_keys(struct ipw_priv *priv, int type) 5661static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5697{ 5662{
5698 struct ipw_wep_key *key; 5663 struct ipw_wep_key key;
5699 int i; 5664 int i;
5700 struct host_cmd cmd = {
5701 .cmd = IPW_CMD_WEP_KEY,
5702 .len = sizeof(*key)
5703 };
5704 5665
5705 key = (struct ipw_wep_key *)&cmd.param; 5666 key.cmd_id = DINO_CMD_WEP_KEY;
5706 key->cmd_id = DINO_CMD_WEP_KEY; 5667 key.seq_num = 0;
5707 key->seq_num = 0;
5708 5668
5709 /* Note: AES keys cannot be set for multiple times. 5669 /* Note: AES keys cannot be set for multiple times.
5710 * Only set it at the first time. */ 5670 * Only set it at the first time. */
5711 for (i = 0; i < 4; i++) { 5671 for (i = 0; i < 4; i++) {
5712 key->key_index = i | type; 5672 key.key_index = i | type;
5713 if (!(priv->ieee->sec.flags & (1 << i))) { 5673 if (!(priv->ieee->sec.flags & (1 << i))) {
5714 key->key_size = 0; 5674 key.key_size = 0;
5715 continue; 5675 continue;
5716 } 5676 }
5717 5677
5718 key->key_size = priv->ieee->sec.key_sizes[i]; 5678 key.key_size = priv->ieee->sec.key_sizes[i];
5719 memcpy(key->key, priv->ieee->sec.keys[i], key->key_size); 5679 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5720 5680
5721 ipw_send_cmd(priv, &cmd); 5681 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5722 } 5682 }
5723} 5683}
5724 5684
@@ -5822,9 +5782,9 @@ static void ipw_adhoc_check(void *data)
5822static void ipw_bg_adhoc_check(void *data) 5782static void ipw_bg_adhoc_check(void *data)
5823{ 5783{
5824 struct ipw_priv *priv = data; 5784 struct ipw_priv *priv = data;
5825 down(&priv->sem); 5785 mutex_lock(&priv->mutex);
5826 ipw_adhoc_check(data); 5786 ipw_adhoc_check(data);
5827 up(&priv->sem); 5787 mutex_unlock(&priv->mutex);
5828} 5788}
5829 5789
5830#ifdef CONFIG_IPW2200_DEBUG 5790#ifdef CONFIG_IPW2200_DEBUG
@@ -5950,7 +5910,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
5950 const struct ieee80211_geo *geo; 5910 const struct ieee80211_geo *geo;
5951 int i; 5911 int i;
5952 5912
5953 geo = ipw_get_geo(priv->ieee); 5913 geo = ieee80211_get_geo(priv->ieee);
5954 5914
5955 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) { 5915 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5956 int start = channel_index; 5916 int start = channel_index;
@@ -6010,7 +5970,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
6010 channel_index++; 5970 channel_index++;
6011 scan->channels_list[channel_index] = channel; 5971 scan->channels_list[channel_index] = channel;
6012 index = 5972 index =
6013 ipw_channel_to_index(priv->ieee, channel); 5973 ieee80211_channel_to_index(priv->ieee, channel);
6014 ipw_set_scan_type(scan, channel_index, 5974 ipw_set_scan_type(scan, channel_index,
6015 geo->bg[index]. 5975 geo->bg[index].
6016 flags & 5976 flags &
@@ -6051,7 +6011,7 @@ static int ipw_request_scan(struct ipw_priv *priv)
6051 (priv->status & STATUS_EXIT_PENDING)) 6011 (priv->status & STATUS_EXIT_PENDING))
6052 return 0; 6012 return 0;
6053 6013
6054 down(&priv->sem); 6014 mutex_lock(&priv->mutex);
6055 6015
6056 if (priv->status & STATUS_SCANNING) { 6016 if (priv->status & STATUS_SCANNING) {
6057 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n"); 6017 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
@@ -6092,7 +6052,7 @@ static int ipw_request_scan(struct ipw_priv *priv)
6092 u8 channel; 6052 u8 channel;
6093 u8 band = 0; 6053 u8 band = 0;
6094 6054
6095 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) { 6055 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6096 case IEEE80211_52GHZ_BAND: 6056 case IEEE80211_52GHZ_BAND:
6097 band = (u8) (IPW_A_MODE << 6) | 1; 6057 band = (u8) (IPW_A_MODE << 6) | 1;
6098 channel = priv->channel; 6058 channel = priv->channel;
@@ -6159,16 +6119,16 @@ static int ipw_request_scan(struct ipw_priv *priv)
6159 queue_delayed_work(priv->workqueue, &priv->scan_check, 6119 queue_delayed_work(priv->workqueue, &priv->scan_check,
6160 IPW_SCAN_CHECK_WATCHDOG); 6120 IPW_SCAN_CHECK_WATCHDOG);
6161 done: 6121 done:
6162 up(&priv->sem); 6122 mutex_unlock(&priv->mutex);
6163 return err; 6123 return err;
6164} 6124}
6165 6125
6166static void ipw_bg_abort_scan(void *data) 6126static void ipw_bg_abort_scan(void *data)
6167{ 6127{
6168 struct ipw_priv *priv = data; 6128 struct ipw_priv *priv = data;
6169 down(&priv->sem); 6129 mutex_lock(&priv->mutex);
6170 ipw_abort_scan(data); 6130 ipw_abort_scan(data);
6171 up(&priv->sem); 6131 mutex_unlock(&priv->mutex);
6172} 6132}
6173 6133
6174static int ipw_wpa_enable(struct ipw_priv *priv, int value) 6134static int ipw_wpa_enable(struct ipw_priv *priv, int value)
@@ -6193,6 +6153,9 @@ static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6193 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { 6153 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6194 sec.auth_mode = WLAN_AUTH_OPEN; 6154 sec.auth_mode = WLAN_AUTH_OPEN;
6195 ieee->open_wep = 1; 6155 ieee->open_wep = 1;
6156 } else if (value & IW_AUTH_ALG_LEAP) {
6157 sec.auth_mode = WLAN_AUTH_LEAP;
6158 ieee->open_wep = 1;
6196 } else 6159 } else
6197 return -EINVAL; 6160 return -EINVAL;
6198 6161
@@ -6204,7 +6167,8 @@ static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6204 return ret; 6167 return ret;
6205} 6168}
6206 6169
6207void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len) 6170static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6171 int wpa_ie_len)
6208{ 6172{
6209 /* make sure WPA is enabled */ 6173 /* make sure WPA is enabled */
6210 ipw_wpa_enable(priv, 1); 6174 ipw_wpa_enable(priv, 1);
@@ -6215,15 +6179,10 @@ void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len)
6215static int ipw_set_rsn_capa(struct ipw_priv *priv, 6179static int ipw_set_rsn_capa(struct ipw_priv *priv,
6216 char *capabilities, int length) 6180 char *capabilities, int length)
6217{ 6181{
6218 struct host_cmd cmd = {
6219 .cmd = IPW_CMD_RSN_CAPABILITIES,
6220 .len = length,
6221 };
6222
6223 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); 6182 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6224 6183
6225 memcpy(cmd.param, capabilities, length); 6184 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6226 return ipw_send_cmd(priv, &cmd); 6185 capabilities);
6227} 6186}
6228 6187
6229/* 6188/*
@@ -6244,7 +6203,7 @@ static int ipw_wx_set_genie(struct net_device *dev,
6244 (wrqu->data.length && extra == NULL)) 6203 (wrqu->data.length && extra == NULL))
6245 return -EINVAL; 6204 return -EINVAL;
6246 6205
6247 //down(&priv->sem); 6206 //mutex_lock(&priv->mutex);
6248 6207
6249 //if (!ieee->wpa_enabled) { 6208 //if (!ieee->wpa_enabled) {
6250 // err = -EOPNOTSUPP; 6209 // err = -EOPNOTSUPP;
@@ -6270,7 +6229,7 @@ static int ipw_wx_set_genie(struct net_device *dev,
6270 6229
6271 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); 6230 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6272 out: 6231 out:
6273 //up(&priv->sem); 6232 //mutex_unlock(&priv->mutex);
6274 return err; 6233 return err;
6275} 6234}
6276 6235
@@ -6283,7 +6242,7 @@ static int ipw_wx_get_genie(struct net_device *dev,
6283 struct ieee80211_device *ieee = priv->ieee; 6242 struct ieee80211_device *ieee = priv->ieee;
6284 int err = 0; 6243 int err = 0;
6285 6244
6286 //down(&priv->sem); 6245 //mutex_lock(&priv->mutex);
6287 6246
6288 //if (!ieee->wpa_enabled) { 6247 //if (!ieee->wpa_enabled) {
6289 // err = -EOPNOTSUPP; 6248 // err = -EOPNOTSUPP;
@@ -6304,7 +6263,7 @@ static int ipw_wx_get_genie(struct net_device *dev,
6304 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); 6263 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6305 6264
6306 out: 6265 out:
6307 //up(&priv->sem); 6266 //mutex_unlock(&priv->mutex);
6308 return err; 6267 return err;
6309} 6268}
6310 6269
@@ -6556,7 +6515,7 @@ static int ipw_wx_set_mlme(struct net_device *dev,
6556* get the modulation type of the current network or 6515* get the modulation type of the current network or
6557* the card current mode 6516* the card current mode
6558*/ 6517*/
6559u8 ipw_qos_current_mode(struct ipw_priv * priv) 6518static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6560{ 6519{
6561 u8 mode = 0; 6520 u8 mode = 0;
6562 6521
@@ -6964,12 +6923,12 @@ static void ipw_bg_qos_activate(void *data)
6964 if (priv == NULL) 6923 if (priv == NULL)
6965 return; 6924 return;
6966 6925
6967 down(&priv->sem); 6926 mutex_lock(&priv->mutex);
6968 6927
6969 if (priv->status & STATUS_ASSOCIATED) 6928 if (priv->status & STATUS_ASSOCIATED)
6970 ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); 6929 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
6971 6930
6972 up(&priv->sem); 6931 mutex_unlock(&priv->mutex);
6973} 6932}
6974 6933
6975static int ipw_handle_probe_response(struct net_device *dev, 6934static int ipw_handle_probe_response(struct net_device *dev,
@@ -7010,25 +6969,15 @@ static int ipw_handle_assoc_response(struct net_device *dev,
7010static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters 6969static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7011 *qos_param) 6970 *qos_param)
7012{ 6971{
7013 struct host_cmd cmd = { 6972 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7014 .cmd = IPW_CMD_QOS_PARAMETERS, 6973 sizeof(*qos_param) * 3, qos_param);
7015 .len = (sizeof(struct ieee80211_qos_parameters) * 3)
7016 };
7017
7018 memcpy(cmd.param, qos_param, sizeof(*qos_param) * 3);
7019 return ipw_send_cmd(priv, &cmd);
7020} 6974}
7021 6975
7022static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element 6976static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7023 *qos_param) 6977 *qos_param)
7024{ 6978{
7025 struct host_cmd cmd = { 6979 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7026 .cmd = IPW_CMD_WME_INFO, 6980 qos_param);
7027 .len = sizeof(*qos_param)
7028 };
7029
7030 memcpy(cmd.param, qos_param, sizeof(*qos_param));
7031 return ipw_send_cmd(priv, &cmd);
7032} 6981}
7033 6982
7034#endif /* CONFIG_IPW_QOS */ 6983#endif /* CONFIG_IPW_QOS */
@@ -7052,19 +7001,21 @@ static int ipw_associate_network(struct ipw_priv *priv,
7052 7001
7053 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); 7002 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7054 priv->assoc_request.channel = network->channel; 7003 priv->assoc_request.channel = network->channel;
7004 priv->assoc_request.auth_key = 0;
7005
7055 if ((priv->capability & CAP_PRIVACY_ON) && 7006 if ((priv->capability & CAP_PRIVACY_ON) &&
7056 (priv->capability & CAP_SHARED_KEY)) { 7007 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7057 priv->assoc_request.auth_type = AUTH_SHARED_KEY; 7008 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7058 priv->assoc_request.auth_key = priv->ieee->sec.active_key; 7009 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7059 7010
7060 if ((priv->capability & CAP_PRIVACY_ON) && 7011 if (priv->ieee->sec.level == SEC_LEVEL_1)
7061 (priv->ieee->sec.level == SEC_LEVEL_1) &&
7062 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
7063 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 7012 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7064 } else { 7013
7014 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7015 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7016 priv->assoc_request.auth_type = AUTH_LEAP;
7017 else
7065 priv->assoc_request.auth_type = AUTH_OPEN; 7018 priv->assoc_request.auth_type = AUTH_OPEN;
7066 priv->assoc_request.auth_key = 0;
7067 }
7068 7019
7069 if (priv->ieee->wpa_ie_len) { 7020 if (priv->ieee->wpa_ie_len) {
7070 priv->assoc_request.policy_support = 0x02; /* RSN active */ 7021 priv->assoc_request.policy_support = 0x02; /* RSN active */
@@ -7278,9 +7229,9 @@ static void ipw_roam(void *data)
7278static void ipw_bg_roam(void *data) 7229static void ipw_bg_roam(void *data)
7279{ 7230{
7280 struct ipw_priv *priv = data; 7231 struct ipw_priv *priv = data;
7281 down(&priv->sem); 7232 mutex_lock(&priv->mutex);
7282 ipw_roam(data); 7233 ipw_roam(data);
7283 up(&priv->sem); 7234 mutex_unlock(&priv->mutex);
7284} 7235}
7285 7236
7286static int ipw_associate(void *data) 7237static int ipw_associate(void *data)
@@ -7375,9 +7326,9 @@ static int ipw_associate(void *data)
7375static void ipw_bg_associate(void *data) 7326static void ipw_bg_associate(void *data)
7376{ 7327{
7377 struct ipw_priv *priv = data; 7328 struct ipw_priv *priv = data;
7378 down(&priv->sem); 7329 mutex_lock(&priv->mutex);
7379 ipw_associate(data); 7330 ipw_associate(data);
7380 up(&priv->sem); 7331 mutex_unlock(&priv->mutex);
7381} 7332}
7382 7333
7383static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, 7334static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
@@ -7811,12 +7762,10 @@ static void ipw_rx(struct ipw_priv *priv)
7811 7762
7812 while (i != r) { 7763 while (i != r) {
7813 rxb = priv->rxq->queue[i]; 7764 rxb = priv->rxq->queue[i];
7814#ifdef CONFIG_IPW2200_DEBUG
7815 if (unlikely(rxb == NULL)) { 7765 if (unlikely(rxb == NULL)) {
7816 printk(KERN_CRIT "Queue not allocated!\n"); 7766 printk(KERN_CRIT "Queue not allocated!\n");
7817 break; 7767 break;
7818 } 7768 }
7819#endif
7820 priv->rxq->queue[i] = NULL; 7769 priv->rxq->queue[i] = NULL;
7821 7770
7822 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 7771 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
@@ -7835,7 +7784,8 @@ static void ipw_rx(struct ipw_priv *priv)
7835 le16_to_cpu(pkt->u.frame.rssi_dbm) - 7784 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7836 IPW_RSSI_TO_DBM, 7785 IPW_RSSI_TO_DBM,
7837 .signal = 7786 .signal =
7838 le16_to_cpu(pkt->u.frame.signal), 7787 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7788 IPW_RSSI_TO_DBM + 0x100,
7839 .noise = 7789 .noise =
7840 le16_to_cpu(pkt->u.frame.noise), 7790 le16_to_cpu(pkt->u.frame.noise),
7841 .rate = pkt->u.frame.rate, 7791 .rate = pkt->u.frame.rate,
@@ -7899,7 +7849,8 @@ static void ipw_rx(struct ipw_priv *priv)
7899 le16_to_cpu(pkt->u.frame.length)); 7849 le16_to_cpu(pkt->u.frame.length));
7900 7850
7901 if (le16_to_cpu(pkt->u.frame.length) < 7851 if (le16_to_cpu(pkt->u.frame.length) <
7902 frame_hdr_len(header)) { 7852 ieee80211_get_hdrlen(le16_to_cpu(
7853 header->frame_ctl))) {
7903 IPW_DEBUG_DROP 7854 IPW_DEBUG_DROP
7904 ("Received packet is too small. " 7855 ("Received packet is too small. "
7905 "Dropping.\n"); 7856 "Dropping.\n");
@@ -7989,7 +7940,14 @@ static void ipw_rx(struct ipw_priv *priv)
7989#define DEFAULT_SHORT_RETRY_LIMIT 7U 7940#define DEFAULT_SHORT_RETRY_LIMIT 7U
7990#define DEFAULT_LONG_RETRY_LIMIT 4U 7941#define DEFAULT_LONG_RETRY_LIMIT 4U
7991 7942
7992static int ipw_sw_reset(struct ipw_priv *priv, int init) 7943/**
7944 * ipw_sw_reset
7945 * @option: options to control different reset behaviour
7946 * 0 = reset everything except the 'disable' module_param
7947 * 1 = reset everything and print out driver info (for probe only)
7948 * 2 = reset everything
7949 */
7950static int ipw_sw_reset(struct ipw_priv *priv, int option)
7993{ 7951{
7994 int band, modulation; 7952 int band, modulation;
7995 int old_mode = priv->ieee->iw_mode; 7953 int old_mode = priv->ieee->iw_mode;
@@ -8016,7 +7974,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init)
8016 priv->essid_len = 0; 7974 priv->essid_len = 0;
8017 memset(priv->essid, 0, IW_ESSID_MAX_SIZE); 7975 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8018 7976
8019 if (disable) { 7977 if (disable && option) {
8020 priv->status |= STATUS_RF_KILL_SW; 7978 priv->status |= STATUS_RF_KILL_SW;
8021 IPW_DEBUG_INFO("Radio disabled.\n"); 7979 IPW_DEBUG_INFO("Radio disabled.\n");
8022 } 7980 }
@@ -8068,7 +8026,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init)
8068 8026
8069 if ((priv->pci_dev->device == 0x4223) || 8027 if ((priv->pci_dev->device == 0x4223) ||
8070 (priv->pci_dev->device == 0x4224)) { 8028 (priv->pci_dev->device == 0x4224)) {
8071 if (init) 8029 if (option == 1)
8072 printk(KERN_INFO DRV_NAME 8030 printk(KERN_INFO DRV_NAME
8073 ": Detected Intel PRO/Wireless 2915ABG Network " 8031 ": Detected Intel PRO/Wireless 2915ABG Network "
8074 "Connection\n"); 8032 "Connection\n");
@@ -8079,7 +8037,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init)
8079 priv->adapter = IPW_2915ABG; 8037 priv->adapter = IPW_2915ABG;
8080 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; 8038 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8081 } else { 8039 } else {
8082 if (init) 8040 if (option == 1)
8083 printk(KERN_INFO DRV_NAME 8041 printk(KERN_INFO DRV_NAME
8084 ": Detected Intel PRO/Wireless 2200BG Network " 8042 ": Detected Intel PRO/Wireless 2200BG Network "
8085 "Connection\n"); 8043 "Connection\n");
@@ -8126,7 +8084,7 @@ static int ipw_wx_get_name(struct net_device *dev,
8126 union iwreq_data *wrqu, char *extra) 8084 union iwreq_data *wrqu, char *extra)
8127{ 8085{
8128 struct ipw_priv *priv = ieee80211_priv(dev); 8086 struct ipw_priv *priv = ieee80211_priv(dev);
8129 down(&priv->sem); 8087 mutex_lock(&priv->mutex);
8130 if (priv->status & STATUS_RF_KILL_MASK) 8088 if (priv->status & STATUS_RF_KILL_MASK)
8131 strcpy(wrqu->name, "radio off"); 8089 strcpy(wrqu->name, "radio off");
8132 else if (!(priv->status & STATUS_ASSOCIATED)) 8090 else if (!(priv->status & STATUS_ASSOCIATED))
@@ -8135,7 +8093,7 @@ static int ipw_wx_get_name(struct net_device *dev,
8135 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c", 8093 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8136 ipw_modes[priv->assoc_request.ieee_mode]); 8094 ipw_modes[priv->assoc_request.ieee_mode]);
8137 IPW_DEBUG_WX("Name: %s\n", wrqu->name); 8095 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8138 up(&priv->sem); 8096 mutex_unlock(&priv->mutex);
8139 return 0; 8097 return 0;
8140} 8098}
8141 8099
@@ -8196,7 +8154,7 @@ static int ipw_wx_set_freq(struct net_device *dev,
8196 union iwreq_data *wrqu, char *extra) 8154 union iwreq_data *wrqu, char *extra)
8197{ 8155{
8198 struct ipw_priv *priv = ieee80211_priv(dev); 8156 struct ipw_priv *priv = ieee80211_priv(dev);
8199 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); 8157 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8200 struct iw_freq *fwrq = &wrqu->freq; 8158 struct iw_freq *fwrq = &wrqu->freq;
8201 int ret = 0, i; 8159 int ret = 0, i;
8202 u8 channel, flags; 8160 u8 channel, flags;
@@ -8204,24 +8162,24 @@ static int ipw_wx_set_freq(struct net_device *dev,
8204 8162
8205 if (fwrq->m == 0) { 8163 if (fwrq->m == 0) {
8206 IPW_DEBUG_WX("SET Freq/Channel -> any\n"); 8164 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8207 down(&priv->sem); 8165 mutex_lock(&priv->mutex);
8208 ret = ipw_set_channel(priv, 0); 8166 ret = ipw_set_channel(priv, 0);
8209 up(&priv->sem); 8167 mutex_unlock(&priv->mutex);
8210 return ret; 8168 return ret;
8211 } 8169 }
8212 /* if setting by freq convert to channel */ 8170 /* if setting by freq convert to channel */
8213 if (fwrq->e == 1) { 8171 if (fwrq->e == 1) {
8214 channel = ipw_freq_to_channel(priv->ieee, fwrq->m); 8172 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8215 if (channel == 0) 8173 if (channel == 0)
8216 return -EINVAL; 8174 return -EINVAL;
8217 } else 8175 } else
8218 channel = fwrq->m; 8176 channel = fwrq->m;
8219 8177
8220 if (!(band = ipw_is_valid_channel(priv->ieee, channel))) 8178 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8221 return -EINVAL; 8179 return -EINVAL;
8222 8180
8223 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 8181 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8224 i = ipw_channel_to_index(priv->ieee, channel); 8182 i = ieee80211_channel_to_index(priv->ieee, channel);
8225 if (i == -1) 8183 if (i == -1)
8226 return -EINVAL; 8184 return -EINVAL;
8227 8185
@@ -8234,9 +8192,9 @@ static int ipw_wx_set_freq(struct net_device *dev,
8234 } 8192 }
8235 8193
8236 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 8194 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8237 down(&priv->sem); 8195 mutex_lock(&priv->mutex);
8238 ret = ipw_set_channel(priv, channel); 8196 ret = ipw_set_channel(priv, channel);
8239 up(&priv->sem); 8197 mutex_unlock(&priv->mutex);
8240 return ret; 8198 return ret;
8241} 8199}
8242 8200
@@ -8250,14 +8208,14 @@ static int ipw_wx_get_freq(struct net_device *dev,
8250 8208
8251 /* If we are associated, trying to associate, or have a statically 8209 /* If we are associated, trying to associate, or have a statically
8252 * configured CHANNEL then return that; otherwise return ANY */ 8210 * configured CHANNEL then return that; otherwise return ANY */
8253 down(&priv->sem); 8211 mutex_lock(&priv->mutex);
8254 if (priv->config & CFG_STATIC_CHANNEL || 8212 if (priv->config & CFG_STATIC_CHANNEL ||
8255 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) 8213 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8256 wrqu->freq.m = priv->channel; 8214 wrqu->freq.m = priv->channel;
8257 else 8215 else
8258 wrqu->freq.m = 0; 8216 wrqu->freq.m = 0;
8259 8217
8260 up(&priv->sem); 8218 mutex_unlock(&priv->mutex);
8261 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); 8219 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8262 return 0; 8220 return 0;
8263} 8221}
@@ -8287,7 +8245,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
8287 if (wrqu->mode == priv->ieee->iw_mode) 8245 if (wrqu->mode == priv->ieee->iw_mode)
8288 return 0; 8246 return 0;
8289 8247
8290 down(&priv->sem); 8248 mutex_lock(&priv->mutex);
8291 8249
8292 ipw_sw_reset(priv, 0); 8250 ipw_sw_reset(priv, 0);
8293 8251
@@ -8310,7 +8268,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
8310 priv->ieee->iw_mode = wrqu->mode; 8268 priv->ieee->iw_mode = wrqu->mode;
8311 8269
8312 queue_work(priv->workqueue, &priv->adapter_restart); 8270 queue_work(priv->workqueue, &priv->adapter_restart);
8313 up(&priv->sem); 8271 mutex_unlock(&priv->mutex);
8314 return err; 8272 return err;
8315} 8273}
8316 8274
@@ -8319,10 +8277,10 @@ static int ipw_wx_get_mode(struct net_device *dev,
8319 union iwreq_data *wrqu, char *extra) 8277 union iwreq_data *wrqu, char *extra)
8320{ 8278{
8321 struct ipw_priv *priv = ieee80211_priv(dev); 8279 struct ipw_priv *priv = ieee80211_priv(dev);
8322 down(&priv->sem); 8280 mutex_lock(&priv->mutex);
8323 wrqu->mode = priv->ieee->iw_mode; 8281 wrqu->mode = priv->ieee->iw_mode;
8324 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); 8282 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8325 up(&priv->sem); 8283 mutex_unlock(&priv->mutex);
8326 return 0; 8284 return 0;
8327} 8285}
8328 8286
@@ -8349,7 +8307,7 @@ static int ipw_wx_get_range(struct net_device *dev,
8349{ 8307{
8350 struct ipw_priv *priv = ieee80211_priv(dev); 8308 struct ipw_priv *priv = ieee80211_priv(dev);
8351 struct iw_range *range = (struct iw_range *)extra; 8309 struct iw_range *range = (struct iw_range *)extra;
8352 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee); 8310 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8353 int i = 0, j; 8311 int i = 0, j;
8354 8312
8355 wrqu->data.length = sizeof(*range); 8313 wrqu->data.length = sizeof(*range);
@@ -8361,7 +8319,7 @@ static int ipw_wx_get_range(struct net_device *dev,
8361 range->max_qual.qual = 100; 8319 range->max_qual.qual = 100;
8362 /* TODO: Find real max RSSI and stick here */ 8320 /* TODO: Find real max RSSI and stick here */
8363 range->max_qual.level = 0; 8321 range->max_qual.level = 0;
8364 range->max_qual.noise = priv->ieee->worst_rssi + 0x100; 8322 range->max_qual.noise = 0;
8365 range->max_qual.updated = 7; /* Updated all three */ 8323 range->max_qual.updated = 7; /* Updated all three */
8366 8324
8367 range->avg_qual.qual = 70; 8325 range->avg_qual.qual = 70;
@@ -8369,7 +8327,7 @@ static int ipw_wx_get_range(struct net_device *dev,
8369 range->avg_qual.level = 0; /* FIXME to real average level */ 8327 range->avg_qual.level = 0; /* FIXME to real average level */
8370 range->avg_qual.noise = 0; 8328 range->avg_qual.noise = 0;
8371 range->avg_qual.updated = 7; /* Updated all three */ 8329 range->avg_qual.updated = 7; /* Updated all three */
8372 down(&priv->sem); 8330 mutex_lock(&priv->mutex);
8373 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); 8331 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8374 8332
8375 for (i = 0; i < range->num_bitrates; i++) 8333 for (i = 0; i < range->num_bitrates; i++)
@@ -8387,31 +8345,39 @@ static int ipw_wx_get_range(struct net_device *dev,
8387 8345
8388 /* Set the Wireless Extension versions */ 8346 /* Set the Wireless Extension versions */
8389 range->we_version_compiled = WIRELESS_EXT; 8347 range->we_version_compiled = WIRELESS_EXT;
8390 range->we_version_source = 16; 8348 range->we_version_source = 18;
8391 8349
8392 i = 0; 8350 i = 0;
8393 if (priv->ieee->mode & (IEEE_B | IEEE_G)) { 8351 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8394 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; 8352 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8395 i++, j++) { 8353 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8354 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8355 continue;
8356
8396 range->freq[i].i = geo->bg[j].channel; 8357 range->freq[i].i = geo->bg[j].channel;
8397 range->freq[i].m = geo->bg[j].freq * 100000; 8358 range->freq[i].m = geo->bg[j].freq * 100000;
8398 range->freq[i].e = 1; 8359 range->freq[i].e = 1;
8360 i++;
8399 } 8361 }
8400 } 8362 }
8401 8363
8402 if (priv->ieee->mode & IEEE_A) { 8364 if (priv->ieee->mode & IEEE_A) {
8403 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; 8365 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8404 i++, j++) { 8366 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8367 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8368 continue;
8369
8405 range->freq[i].i = geo->a[j].channel; 8370 range->freq[i].i = geo->a[j].channel;
8406 range->freq[i].m = geo->a[j].freq * 100000; 8371 range->freq[i].m = geo->a[j].freq * 100000;
8407 range->freq[i].e = 1; 8372 range->freq[i].e = 1;
8373 i++;
8408 } 8374 }
8409 } 8375 }
8410 8376
8411 range->num_channels = i; 8377 range->num_channels = i;
8412 range->num_frequency = i; 8378 range->num_frequency = i;
8413 8379
8414 up(&priv->sem); 8380 mutex_unlock(&priv->mutex);
8415 8381
8416 /* Event capability (kernel + driver) */ 8382 /* Event capability (kernel + driver) */
8417 range->event_capa[0] = (IW_EVENT_CAPA_K_0 | 8383 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
@@ -8419,6 +8385,9 @@ static int ipw_wx_get_range(struct net_device *dev,
8419 IW_EVENT_CAPA_MASK(SIOCGIWAP)); 8385 IW_EVENT_CAPA_MASK(SIOCGIWAP));
8420 range->event_capa[1] = IW_EVENT_CAPA_K_1; 8386 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8421 8387
8388 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8389 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8390
8422 IPW_DEBUG_WX("GET Range\n"); 8391 IPW_DEBUG_WX("GET Range\n");
8423 return 0; 8392 return 0;
8424} 8393}
@@ -8438,7 +8407,7 @@ static int ipw_wx_set_wap(struct net_device *dev,
8438 8407
8439 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 8408 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8440 return -EINVAL; 8409 return -EINVAL;
8441 down(&priv->sem); 8410 mutex_lock(&priv->mutex);
8442 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || 8411 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8443 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { 8412 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8444 /* we disable mandatory BSSID association */ 8413 /* we disable mandatory BSSID association */
@@ -8447,14 +8416,14 @@ static int ipw_wx_set_wap(struct net_device *dev,
8447 IPW_DEBUG_ASSOC("Attempting to associate with new " 8416 IPW_DEBUG_ASSOC("Attempting to associate with new "
8448 "parameters.\n"); 8417 "parameters.\n");
8449 ipw_associate(priv); 8418 ipw_associate(priv);
8450 up(&priv->sem); 8419 mutex_unlock(&priv->mutex);
8451 return 0; 8420 return 0;
8452 } 8421 }
8453 8422
8454 priv->config |= CFG_STATIC_BSSID; 8423 priv->config |= CFG_STATIC_BSSID;
8455 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { 8424 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8456 IPW_DEBUG_WX("BSSID set to current BSSID.\n"); 8425 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8457 up(&priv->sem); 8426 mutex_unlock(&priv->mutex);
8458 return 0; 8427 return 0;
8459 } 8428 }
8460 8429
@@ -8468,7 +8437,7 @@ static int ipw_wx_set_wap(struct net_device *dev,
8468 if (!ipw_disassociate(priv)) 8437 if (!ipw_disassociate(priv))
8469 ipw_associate(priv); 8438 ipw_associate(priv);
8470 8439
8471 up(&priv->sem); 8440 mutex_unlock(&priv->mutex);
8472 return 0; 8441 return 0;
8473} 8442}
8474 8443
@@ -8479,7 +8448,7 @@ static int ipw_wx_get_wap(struct net_device *dev,
8479 struct ipw_priv *priv = ieee80211_priv(dev); 8448 struct ipw_priv *priv = ieee80211_priv(dev);
8480 /* If we are associated, trying to associate, or have a statically 8449 /* If we are associated, trying to associate, or have a statically
8481 * configured BSSID then return that; otherwise return ANY */ 8450 * configured BSSID then return that; otherwise return ANY */
8482 down(&priv->sem); 8451 mutex_lock(&priv->mutex);
8483 if (priv->config & CFG_STATIC_BSSID || 8452 if (priv->config & CFG_STATIC_BSSID ||
8484 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 8453 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8485 wrqu->ap_addr.sa_family = ARPHRD_ETHER; 8454 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
@@ -8489,7 +8458,7 @@ static int ipw_wx_get_wap(struct net_device *dev,
8489 8458
8490 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", 8459 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8491 MAC_ARG(wrqu->ap_addr.sa_data)); 8460 MAC_ARG(wrqu->ap_addr.sa_data));
8492 up(&priv->sem); 8461 mutex_unlock(&priv->mutex);
8493 return 0; 8462 return 0;
8494} 8463}
8495 8464
@@ -8500,7 +8469,7 @@ static int ipw_wx_set_essid(struct net_device *dev,
8500 struct ipw_priv *priv = ieee80211_priv(dev); 8469 struct ipw_priv *priv = ieee80211_priv(dev);
8501 char *essid = ""; /* ANY */ 8470 char *essid = ""; /* ANY */
8502 int length = 0; 8471 int length = 0;
8503 down(&priv->sem); 8472 mutex_lock(&priv->mutex);
8504 if (wrqu->essid.flags && wrqu->essid.length) { 8473 if (wrqu->essid.flags && wrqu->essid.length) {
8505 length = wrqu->essid.length - 1; 8474 length = wrqu->essid.length - 1;
8506 essid = extra; 8475 essid = extra;
@@ -8515,7 +8484,7 @@ static int ipw_wx_set_essid(struct net_device *dev,
8515 priv->config &= ~CFG_STATIC_ESSID; 8484 priv->config &= ~CFG_STATIC_ESSID;
8516 ipw_associate(priv); 8485 ipw_associate(priv);
8517 } 8486 }
8518 up(&priv->sem); 8487 mutex_unlock(&priv->mutex);
8519 return 0; 8488 return 0;
8520 } 8489 }
8521 8490
@@ -8525,7 +8494,7 @@ static int ipw_wx_set_essid(struct net_device *dev,
8525 8494
8526 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { 8495 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8527 IPW_DEBUG_WX("ESSID set to current ESSID.\n"); 8496 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8528 up(&priv->sem); 8497 mutex_unlock(&priv->mutex);
8529 return 0; 8498 return 0;
8530 } 8499 }
8531 8500
@@ -8540,7 +8509,7 @@ static int ipw_wx_set_essid(struct net_device *dev,
8540 if (!ipw_disassociate(priv)) 8509 if (!ipw_disassociate(priv))
8541 ipw_associate(priv); 8510 ipw_associate(priv);
8542 8511
8543 up(&priv->sem); 8512 mutex_unlock(&priv->mutex);
8544 return 0; 8513 return 0;
8545} 8514}
8546 8515
@@ -8552,7 +8521,7 @@ static int ipw_wx_get_essid(struct net_device *dev,
8552 8521
8553 /* If we are associated, trying to associate, or have a statically 8522 /* If we are associated, trying to associate, or have a statically
8554 * configured ESSID then return that; otherwise return ANY */ 8523 * configured ESSID then return that; otherwise return ANY */
8555 down(&priv->sem); 8524 mutex_lock(&priv->mutex);
8556 if (priv->config & CFG_STATIC_ESSID || 8525 if (priv->config & CFG_STATIC_ESSID ||
8557 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 8526 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8558 IPW_DEBUG_WX("Getting essid: '%s'\n", 8527 IPW_DEBUG_WX("Getting essid: '%s'\n",
@@ -8565,7 +8534,7 @@ static int ipw_wx_get_essid(struct net_device *dev,
8565 wrqu->essid.length = 0; 8534 wrqu->essid.length = 0;
8566 wrqu->essid.flags = 0; /* active */ 8535 wrqu->essid.flags = 0; /* active */
8567 } 8536 }
8568 up(&priv->sem); 8537 mutex_unlock(&priv->mutex);
8569 return 0; 8538 return 0;
8570} 8539}
8571 8540
@@ -8578,12 +8547,12 @@ static int ipw_wx_set_nick(struct net_device *dev,
8578 IPW_DEBUG_WX("Setting nick to '%s'\n", extra); 8547 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8579 if (wrqu->data.length > IW_ESSID_MAX_SIZE) 8548 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8580 return -E2BIG; 8549 return -E2BIG;
8581 down(&priv->sem); 8550 mutex_lock(&priv->mutex);
8582 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); 8551 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8583 memset(priv->nick, 0, sizeof(priv->nick)); 8552 memset(priv->nick, 0, sizeof(priv->nick));
8584 memcpy(priv->nick, extra, wrqu->data.length); 8553 memcpy(priv->nick, extra, wrqu->data.length);
8585 IPW_DEBUG_TRACE("<<\n"); 8554 IPW_DEBUG_TRACE("<<\n");
8586 up(&priv->sem); 8555 mutex_unlock(&priv->mutex);
8587 return 0; 8556 return 0;
8588 8557
8589} 8558}
@@ -8594,11 +8563,57 @@ static int ipw_wx_get_nick(struct net_device *dev,
8594{ 8563{
8595 struct ipw_priv *priv = ieee80211_priv(dev); 8564 struct ipw_priv *priv = ieee80211_priv(dev);
8596 IPW_DEBUG_WX("Getting nick\n"); 8565 IPW_DEBUG_WX("Getting nick\n");
8597 down(&priv->sem); 8566 mutex_lock(&priv->mutex);
8598 wrqu->data.length = strlen(priv->nick) + 1; 8567 wrqu->data.length = strlen(priv->nick) + 1;
8599 memcpy(extra, priv->nick, wrqu->data.length); 8568 memcpy(extra, priv->nick, wrqu->data.length);
8600 wrqu->data.flags = 1; /* active */ 8569 wrqu->data.flags = 1; /* active */
8601 up(&priv->sem); 8570 mutex_unlock(&priv->mutex);
8571 return 0;
8572}
8573
8574static int ipw_wx_set_sens(struct net_device *dev,
8575 struct iw_request_info *info,
8576 union iwreq_data *wrqu, char *extra)
8577{
8578 struct ipw_priv *priv = ieee80211_priv(dev);
8579 int err = 0;
8580
8581 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8582 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8583 mutex_lock(&priv->mutex);
8584
8585 if (wrqu->sens.fixed == 0)
8586 {
8587 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8588 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8589 goto out;
8590 }
8591 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8592 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8593 err = -EINVAL;
8594 goto out;
8595 }
8596
8597 priv->roaming_threshold = wrqu->sens.value;
8598 priv->disassociate_threshold = 3*wrqu->sens.value;
8599 out:
8600 mutex_unlock(&priv->mutex);
8601 return err;
8602}
8603
8604static int ipw_wx_get_sens(struct net_device *dev,
8605 struct iw_request_info *info,
8606 union iwreq_data *wrqu, char *extra)
8607{
8608 struct ipw_priv *priv = ieee80211_priv(dev);
8609 mutex_lock(&priv->mutex);
8610 wrqu->sens.fixed = 1;
8611 wrqu->sens.value = priv->roaming_threshold;
8612 mutex_unlock(&priv->mutex);
8613
8614 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
8615 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8616
8602 return 0; 8617 return 0;
8603} 8618}
8604 8619
@@ -8691,7 +8706,7 @@ static int ipw_wx_set_rate(struct net_device *dev,
8691 apply: 8706 apply:
8692 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", 8707 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
8693 mask, fixed ? "fixed" : "sub-rates"); 8708 mask, fixed ? "fixed" : "sub-rates");
8694 down(&priv->sem); 8709 mutex_lock(&priv->mutex);
8695 if (mask == IEEE80211_DEFAULT_RATES_MASK) { 8710 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
8696 priv->config &= ~CFG_FIXED_RATE; 8711 priv->config &= ~CFG_FIXED_RATE;
8697 ipw_set_fixed_rate(priv, priv->ieee->mode); 8712 ipw_set_fixed_rate(priv, priv->ieee->mode);
@@ -8700,7 +8715,7 @@ static int ipw_wx_set_rate(struct net_device *dev,
8700 8715
8701 if (priv->rates_mask == mask) { 8716 if (priv->rates_mask == mask) {
8702 IPW_DEBUG_WX("Mask set to current mask.\n"); 8717 IPW_DEBUG_WX("Mask set to current mask.\n");
8703 up(&priv->sem); 8718 mutex_unlock(&priv->mutex);
8704 return 0; 8719 return 0;
8705 } 8720 }
8706 8721
@@ -8711,7 +8726,7 @@ static int ipw_wx_set_rate(struct net_device *dev,
8711 if (!ipw_disassociate(priv)) 8726 if (!ipw_disassociate(priv))
8712 ipw_associate(priv); 8727 ipw_associate(priv);
8713 8728
8714 up(&priv->sem); 8729 mutex_unlock(&priv->mutex);
8715 return 0; 8730 return 0;
8716} 8731}
8717 8732
@@ -8720,9 +8735,9 @@ static int ipw_wx_get_rate(struct net_device *dev,
8720 union iwreq_data *wrqu, char *extra) 8735 union iwreq_data *wrqu, char *extra)
8721{ 8736{
8722 struct ipw_priv *priv = ieee80211_priv(dev); 8737 struct ipw_priv *priv = ieee80211_priv(dev);
8723 down(&priv->sem); 8738 mutex_lock(&priv->mutex);
8724 wrqu->bitrate.value = priv->last_rate; 8739 wrqu->bitrate.value = priv->last_rate;
8725 up(&priv->sem); 8740 mutex_unlock(&priv->mutex);
8726 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 8741 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
8727 return 0; 8742 return 0;
8728} 8743}
@@ -8732,20 +8747,20 @@ static int ipw_wx_set_rts(struct net_device *dev,
8732 union iwreq_data *wrqu, char *extra) 8747 union iwreq_data *wrqu, char *extra)
8733{ 8748{
8734 struct ipw_priv *priv = ieee80211_priv(dev); 8749 struct ipw_priv *priv = ieee80211_priv(dev);
8735 down(&priv->sem); 8750 mutex_lock(&priv->mutex);
8736 if (wrqu->rts.disabled) 8751 if (wrqu->rts.disabled)
8737 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 8752 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8738 else { 8753 else {
8739 if (wrqu->rts.value < MIN_RTS_THRESHOLD || 8754 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
8740 wrqu->rts.value > MAX_RTS_THRESHOLD) { 8755 wrqu->rts.value > MAX_RTS_THRESHOLD) {
8741 up(&priv->sem); 8756 mutex_unlock(&priv->mutex);
8742 return -EINVAL; 8757 return -EINVAL;
8743 } 8758 }
8744 priv->rts_threshold = wrqu->rts.value; 8759 priv->rts_threshold = wrqu->rts.value;
8745 } 8760 }
8746 8761
8747 ipw_send_rts_threshold(priv, priv->rts_threshold); 8762 ipw_send_rts_threshold(priv, priv->rts_threshold);
8748 up(&priv->sem); 8763 mutex_unlock(&priv->mutex);
8749 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); 8764 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
8750 return 0; 8765 return 0;
8751} 8766}
@@ -8755,11 +8770,11 @@ static int ipw_wx_get_rts(struct net_device *dev,
8755 union iwreq_data *wrqu, char *extra) 8770 union iwreq_data *wrqu, char *extra)
8756{ 8771{
8757 struct ipw_priv *priv = ieee80211_priv(dev); 8772 struct ipw_priv *priv = ieee80211_priv(dev);
8758 down(&priv->sem); 8773 mutex_lock(&priv->mutex);
8759 wrqu->rts.value = priv->rts_threshold; 8774 wrqu->rts.value = priv->rts_threshold;
8760 wrqu->rts.fixed = 0; /* no auto select */ 8775 wrqu->rts.fixed = 0; /* no auto select */
8761 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); 8776 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
8762 up(&priv->sem); 8777 mutex_unlock(&priv->mutex);
8763 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); 8778 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
8764 return 0; 8779 return 0;
8765} 8780}
@@ -8771,7 +8786,7 @@ static int ipw_wx_set_txpow(struct net_device *dev,
8771 struct ipw_priv *priv = ieee80211_priv(dev); 8786 struct ipw_priv *priv = ieee80211_priv(dev);
8772 int err = 0; 8787 int err = 0;
8773 8788
8774 down(&priv->sem); 8789 mutex_lock(&priv->mutex);
8775 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { 8790 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
8776 err = -EINPROGRESS; 8791 err = -EINPROGRESS;
8777 goto out; 8792 goto out;
@@ -8794,7 +8809,7 @@ static int ipw_wx_set_txpow(struct net_device *dev,
8794 priv->tx_power = wrqu->power.value; 8809 priv->tx_power = wrqu->power.value;
8795 err = ipw_set_tx_power(priv); 8810 err = ipw_set_tx_power(priv);
8796 out: 8811 out:
8797 up(&priv->sem); 8812 mutex_unlock(&priv->mutex);
8798 return err; 8813 return err;
8799} 8814}
8800 8815
@@ -8803,12 +8818,12 @@ static int ipw_wx_get_txpow(struct net_device *dev,
8803 union iwreq_data *wrqu, char *extra) 8818 union iwreq_data *wrqu, char *extra)
8804{ 8819{
8805 struct ipw_priv *priv = ieee80211_priv(dev); 8820 struct ipw_priv *priv = ieee80211_priv(dev);
8806 down(&priv->sem); 8821 mutex_lock(&priv->mutex);
8807 wrqu->power.value = priv->tx_power; 8822 wrqu->power.value = priv->tx_power;
8808 wrqu->power.fixed = 1; 8823 wrqu->power.fixed = 1;
8809 wrqu->power.flags = IW_TXPOW_DBM; 8824 wrqu->power.flags = IW_TXPOW_DBM;
8810 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; 8825 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
8811 up(&priv->sem); 8826 mutex_unlock(&priv->mutex);
8812 8827
8813 IPW_DEBUG_WX("GET TX Power -> %s %d \n", 8828 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
8814 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 8829 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
@@ -8821,13 +8836,13 @@ static int ipw_wx_set_frag(struct net_device *dev,
8821 union iwreq_data *wrqu, char *extra) 8836 union iwreq_data *wrqu, char *extra)
8822{ 8837{
8823 struct ipw_priv *priv = ieee80211_priv(dev); 8838 struct ipw_priv *priv = ieee80211_priv(dev);
8824 down(&priv->sem); 8839 mutex_lock(&priv->mutex);
8825 if (wrqu->frag.disabled) 8840 if (wrqu->frag.disabled)
8826 priv->ieee->fts = DEFAULT_FTS; 8841 priv->ieee->fts = DEFAULT_FTS;
8827 else { 8842 else {
8828 if (wrqu->frag.value < MIN_FRAG_THRESHOLD || 8843 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
8829 wrqu->frag.value > MAX_FRAG_THRESHOLD) { 8844 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
8830 up(&priv->sem); 8845 mutex_unlock(&priv->mutex);
8831 return -EINVAL; 8846 return -EINVAL;
8832 } 8847 }
8833 8848
@@ -8835,7 +8850,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
8835 } 8850 }
8836 8851
8837 ipw_send_frag_threshold(priv, wrqu->frag.value); 8852 ipw_send_frag_threshold(priv, wrqu->frag.value);
8838 up(&priv->sem); 8853 mutex_unlock(&priv->mutex);
8839 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); 8854 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
8840 return 0; 8855 return 0;
8841} 8856}
@@ -8845,11 +8860,11 @@ static int ipw_wx_get_frag(struct net_device *dev,
8845 union iwreq_data *wrqu, char *extra) 8860 union iwreq_data *wrqu, char *extra)
8846{ 8861{
8847 struct ipw_priv *priv = ieee80211_priv(dev); 8862 struct ipw_priv *priv = ieee80211_priv(dev);
8848 down(&priv->sem); 8863 mutex_lock(&priv->mutex);
8849 wrqu->frag.value = priv->ieee->fts; 8864 wrqu->frag.value = priv->ieee->fts;
8850 wrqu->frag.fixed = 0; /* no auto select */ 8865 wrqu->frag.fixed = 0; /* no auto select */
8851 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); 8866 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
8852 up(&priv->sem); 8867 mutex_unlock(&priv->mutex);
8853 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); 8868 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
8854 8869
8855 return 0; 8870 return 0;
@@ -8870,7 +8885,7 @@ static int ipw_wx_set_retry(struct net_device *dev,
8870 if (wrqu->retry.value < 0 || wrqu->retry.value > 255) 8885 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
8871 return -EINVAL; 8886 return -EINVAL;
8872 8887
8873 down(&priv->sem); 8888 mutex_lock(&priv->mutex);
8874 if (wrqu->retry.flags & IW_RETRY_MIN) 8889 if (wrqu->retry.flags & IW_RETRY_MIN)
8875 priv->short_retry_limit = (u8) wrqu->retry.value; 8890 priv->short_retry_limit = (u8) wrqu->retry.value;
8876 else if (wrqu->retry.flags & IW_RETRY_MAX) 8891 else if (wrqu->retry.flags & IW_RETRY_MAX)
@@ -8882,7 +8897,7 @@ static int ipw_wx_set_retry(struct net_device *dev,
8882 8897
8883 ipw_send_retry_limit(priv, priv->short_retry_limit, 8898 ipw_send_retry_limit(priv, priv->short_retry_limit,
8884 priv->long_retry_limit); 8899 priv->long_retry_limit);
8885 up(&priv->sem); 8900 mutex_unlock(&priv->mutex);
8886 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", 8901 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
8887 priv->short_retry_limit, priv->long_retry_limit); 8902 priv->short_retry_limit, priv->long_retry_limit);
8888 return 0; 8903 return 0;
@@ -8894,11 +8909,11 @@ static int ipw_wx_get_retry(struct net_device *dev,
8894{ 8909{
8895 struct ipw_priv *priv = ieee80211_priv(dev); 8910 struct ipw_priv *priv = ieee80211_priv(dev);
8896 8911
8897 down(&priv->sem); 8912 mutex_lock(&priv->mutex);
8898 wrqu->retry.disabled = 0; 8913 wrqu->retry.disabled = 0;
8899 8914
8900 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { 8915 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
8901 up(&priv->sem); 8916 mutex_unlock(&priv->mutex);
8902 return -EINVAL; 8917 return -EINVAL;
8903 } 8918 }
8904 8919
@@ -8912,7 +8927,7 @@ static int ipw_wx_get_retry(struct net_device *dev,
8912 wrqu->retry.flags = IW_RETRY_LIMIT; 8927 wrqu->retry.flags = IW_RETRY_LIMIT;
8913 wrqu->retry.value = priv->short_retry_limit; 8928 wrqu->retry.value = priv->short_retry_limit;
8914 } 8929 }
8915 up(&priv->sem); 8930 mutex_unlock(&priv->mutex);
8916 8931
8917 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); 8932 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
8918 8933
@@ -8929,7 +8944,7 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8929 (priv->status & STATUS_EXIT_PENDING)) 8944 (priv->status & STATUS_EXIT_PENDING))
8930 return 0; 8945 return 0;
8931 8946
8932 down(&priv->sem); 8947 mutex_lock(&priv->mutex);
8933 8948
8934 if (priv->status & STATUS_RF_KILL_MASK) { 8949 if (priv->status & STATUS_RF_KILL_MASK) {
8935 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n"); 8950 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
@@ -8981,7 +8996,7 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8981 priv->status |= STATUS_SCANNING; 8996 priv->status |= STATUS_SCANNING;
8982 8997
8983 done: 8998 done:
8984 up(&priv->sem); 8999 mutex_unlock(&priv->mutex);
8985 return err; 9000 return err;
8986} 9001}
8987 9002
@@ -9024,7 +9039,7 @@ static int ipw_wx_set_encode(struct net_device *dev,
9024 int ret; 9039 int ret;
9025 u32 cap = priv->capability; 9040 u32 cap = priv->capability;
9026 9041
9027 down(&priv->sem); 9042 mutex_lock(&priv->mutex);
9028 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key); 9043 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9029 9044
9030 /* In IBSS mode, we need to notify the firmware to update 9045 /* In IBSS mode, we need to notify the firmware to update
@@ -9034,7 +9049,7 @@ static int ipw_wx_set_encode(struct net_device *dev,
9034 priv->status & STATUS_ASSOCIATED) 9049 priv->status & STATUS_ASSOCIATED)
9035 ipw_disassociate(priv); 9050 ipw_disassociate(priv);
9036 9051
9037 up(&priv->sem); 9052 mutex_unlock(&priv->mutex);
9038 return ret; 9053 return ret;
9039} 9054}
9040 9055
@@ -9052,17 +9067,17 @@ static int ipw_wx_set_power(struct net_device *dev,
9052{ 9067{
9053 struct ipw_priv *priv = ieee80211_priv(dev); 9068 struct ipw_priv *priv = ieee80211_priv(dev);
9054 int err; 9069 int err;
9055 down(&priv->sem); 9070 mutex_lock(&priv->mutex);
9056 if (wrqu->power.disabled) { 9071 if (wrqu->power.disabled) {
9057 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); 9072 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9058 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); 9073 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9059 if (err) { 9074 if (err) {
9060 IPW_DEBUG_WX("failed setting power mode.\n"); 9075 IPW_DEBUG_WX("failed setting power mode.\n");
9061 up(&priv->sem); 9076 mutex_unlock(&priv->mutex);
9062 return err; 9077 return err;
9063 } 9078 }
9064 IPW_DEBUG_WX("SET Power Management Mode -> off\n"); 9079 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9065 up(&priv->sem); 9080 mutex_unlock(&priv->mutex);
9066 return 0; 9081 return 0;
9067 } 9082 }
9068 9083
@@ -9074,7 +9089,7 @@ static int ipw_wx_set_power(struct net_device *dev,
9074 default: /* Otherwise we don't support it */ 9089 default: /* Otherwise we don't support it */
9075 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", 9090 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9076 wrqu->power.flags); 9091 wrqu->power.flags);
9077 up(&priv->sem); 9092 mutex_unlock(&priv->mutex);
9078 return -EOPNOTSUPP; 9093 return -EOPNOTSUPP;
9079 } 9094 }
9080 9095
@@ -9087,12 +9102,12 @@ static int ipw_wx_set_power(struct net_device *dev,
9087 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); 9102 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9088 if (err) { 9103 if (err) {
9089 IPW_DEBUG_WX("failed setting power mode.\n"); 9104 IPW_DEBUG_WX("failed setting power mode.\n");
9090 up(&priv->sem); 9105 mutex_unlock(&priv->mutex);
9091 return err; 9106 return err;
9092 } 9107 }
9093 9108
9094 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); 9109 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9095 up(&priv->sem); 9110 mutex_unlock(&priv->mutex);
9096 return 0; 9111 return 0;
9097} 9112}
9098 9113
@@ -9101,13 +9116,13 @@ static int ipw_wx_get_power(struct net_device *dev,
9101 union iwreq_data *wrqu, char *extra) 9116 union iwreq_data *wrqu, char *extra)
9102{ 9117{
9103 struct ipw_priv *priv = ieee80211_priv(dev); 9118 struct ipw_priv *priv = ieee80211_priv(dev);
9104 down(&priv->sem); 9119 mutex_lock(&priv->mutex);
9105 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9120 if (!(priv->power_mode & IPW_POWER_ENABLED))
9106 wrqu->power.disabled = 1; 9121 wrqu->power.disabled = 1;
9107 else 9122 else
9108 wrqu->power.disabled = 0; 9123 wrqu->power.disabled = 0;
9109 9124
9110 up(&priv->sem); 9125 mutex_unlock(&priv->mutex);
9111 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); 9126 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9112 9127
9113 return 0; 9128 return 0;
@@ -9120,7 +9135,7 @@ static int ipw_wx_set_powermode(struct net_device *dev,
9120 struct ipw_priv *priv = ieee80211_priv(dev); 9135 struct ipw_priv *priv = ieee80211_priv(dev);
9121 int mode = *(int *)extra; 9136 int mode = *(int *)extra;
9122 int err; 9137 int err;
9123 down(&priv->sem); 9138 mutex_lock(&priv->mutex);
9124 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) { 9139 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9125 mode = IPW_POWER_AC; 9140 mode = IPW_POWER_AC;
9126 priv->power_mode = mode; 9141 priv->power_mode = mode;
@@ -9133,11 +9148,11 @@ static int ipw_wx_set_powermode(struct net_device *dev,
9133 9148
9134 if (err) { 9149 if (err) {
9135 IPW_DEBUG_WX("failed setting power mode.\n"); 9150 IPW_DEBUG_WX("failed setting power mode.\n");
9136 up(&priv->sem); 9151 mutex_unlock(&priv->mutex);
9137 return err; 9152 return err;
9138 } 9153 }
9139 } 9154 }
9140 up(&priv->sem); 9155 mutex_unlock(&priv->mutex);
9141 return 0; 9156 return 0;
9142} 9157}
9143 9158
@@ -9186,7 +9201,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
9186 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); 9201 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9187 return -EINVAL; 9202 return -EINVAL;
9188 } 9203 }
9189 down(&priv->sem); 9204 mutex_lock(&priv->mutex);
9190 if (priv->adapter == IPW_2915ABG) { 9205 if (priv->adapter == IPW_2915ABG) {
9191 priv->ieee->abg_true = 1; 9206 priv->ieee->abg_true = 1;
9192 if (mode & IEEE_A) { 9207 if (mode & IEEE_A) {
@@ -9198,7 +9213,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
9198 if (mode & IEEE_A) { 9213 if (mode & IEEE_A) {
9199 IPW_WARNING("Attempt to set 2200BG into " 9214 IPW_WARNING("Attempt to set 2200BG into "
9200 "802.11a mode\n"); 9215 "802.11a mode\n");
9201 up(&priv->sem); 9216 mutex_unlock(&priv->mutex);
9202 return -EINVAL; 9217 return -EINVAL;
9203 } 9218 }
9204 9219
@@ -9235,7 +9250,7 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
9235 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", 9250 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9236 mode & IEEE_A ? 'a' : '.', 9251 mode & IEEE_A ? 'a' : '.',
9237 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); 9252 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9238 up(&priv->sem); 9253 mutex_unlock(&priv->mutex);
9239 return 0; 9254 return 0;
9240} 9255}
9241 9256
@@ -9244,7 +9259,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev,
9244 union iwreq_data *wrqu, char *extra) 9259 union iwreq_data *wrqu, char *extra)
9245{ 9260{
9246 struct ipw_priv *priv = ieee80211_priv(dev); 9261 struct ipw_priv *priv = ieee80211_priv(dev);
9247 down(&priv->sem); 9262 mutex_lock(&priv->mutex);
9248 switch (priv->ieee->mode) { 9263 switch (priv->ieee->mode) {
9249 case IEEE_A: 9264 case IEEE_A:
9250 strncpy(extra, "802.11a (1)", MAX_WX_STRING); 9265 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
@@ -9275,7 +9290,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev,
9275 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); 9290 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9276 9291
9277 wrqu->data.length = strlen(extra) + 1; 9292 wrqu->data.length = strlen(extra) + 1;
9278 up(&priv->sem); 9293 mutex_unlock(&priv->mutex);
9279 9294
9280 return 0; 9295 return 0;
9281} 9296}
@@ -9286,7 +9301,7 @@ static int ipw_wx_set_preamble(struct net_device *dev,
9286{ 9301{
9287 struct ipw_priv *priv = ieee80211_priv(dev); 9302 struct ipw_priv *priv = ieee80211_priv(dev);
9288 int mode = *(int *)extra; 9303 int mode = *(int *)extra;
9289 down(&priv->sem); 9304 mutex_lock(&priv->mutex);
9290 /* Switching from SHORT -> LONG requires a disassociation */ 9305 /* Switching from SHORT -> LONG requires a disassociation */
9291 if (mode == 1) { 9306 if (mode == 1) {
9292 if (!(priv->config & CFG_PREAMBLE_LONG)) { 9307 if (!(priv->config & CFG_PREAMBLE_LONG)) {
@@ -9305,11 +9320,11 @@ static int ipw_wx_set_preamble(struct net_device *dev,
9305 priv->config &= ~CFG_PREAMBLE_LONG; 9320 priv->config &= ~CFG_PREAMBLE_LONG;
9306 goto done; 9321 goto done;
9307 } 9322 }
9308 up(&priv->sem); 9323 mutex_unlock(&priv->mutex);
9309 return -EINVAL; 9324 return -EINVAL;
9310 9325
9311 done: 9326 done:
9312 up(&priv->sem); 9327 mutex_unlock(&priv->mutex);
9313 return 0; 9328 return 0;
9314} 9329}
9315 9330
@@ -9318,12 +9333,12 @@ static int ipw_wx_get_preamble(struct net_device *dev,
9318 union iwreq_data *wrqu, char *extra) 9333 union iwreq_data *wrqu, char *extra)
9319{ 9334{
9320 struct ipw_priv *priv = ieee80211_priv(dev); 9335 struct ipw_priv *priv = ieee80211_priv(dev);
9321 down(&priv->sem); 9336 mutex_lock(&priv->mutex);
9322 if (priv->config & CFG_PREAMBLE_LONG) 9337 if (priv->config & CFG_PREAMBLE_LONG)
9323 snprintf(wrqu->name, IFNAMSIZ, "long (1)"); 9338 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9324 else 9339 else
9325 snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); 9340 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9326 up(&priv->sem); 9341 mutex_unlock(&priv->mutex);
9327 return 0; 9342 return 0;
9328} 9343}
9329 9344
@@ -9335,7 +9350,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
9335 struct ipw_priv *priv = ieee80211_priv(dev); 9350 struct ipw_priv *priv = ieee80211_priv(dev);
9336 int *parms = (int *)extra; 9351 int *parms = (int *)extra;
9337 int enable = (parms[0] > 0); 9352 int enable = (parms[0] > 0);
9338 down(&priv->sem); 9353 mutex_lock(&priv->mutex);
9339 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); 9354 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9340 if (enable) { 9355 if (enable) {
9341 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9356 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
@@ -9350,13 +9365,13 @@ static int ipw_wx_set_monitor(struct net_device *dev,
9350 ipw_set_channel(priv, parms[1]); 9365 ipw_set_channel(priv, parms[1]);
9351 } else { 9366 } else {
9352 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9367 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9353 up(&priv->sem); 9368 mutex_unlock(&priv->mutex);
9354 return 0; 9369 return 0;
9355 } 9370 }
9356 priv->net_dev->type = ARPHRD_ETHER; 9371 priv->net_dev->type = ARPHRD_ETHER;
9357 queue_work(priv->workqueue, &priv->adapter_restart); 9372 queue_work(priv->workqueue, &priv->adapter_restart);
9358 } 9373 }
9359 up(&priv->sem); 9374 mutex_unlock(&priv->mutex);
9360 return 0; 9375 return 0;
9361} 9376}
9362 9377
@@ -9386,9 +9401,9 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9386 9401
9387 IPW_DEBUG_WX("SW_RESET\n"); 9402 IPW_DEBUG_WX("SW_RESET\n");
9388 9403
9389 down(&priv->sem); 9404 mutex_lock(&priv->mutex);
9390 9405
9391 ret = ipw_sw_reset(priv, 0); 9406 ret = ipw_sw_reset(priv, 2);
9392 if (!ret) { 9407 if (!ret) {
9393 free_firmware(); 9408 free_firmware();
9394 ipw_adapter_restart(priv); 9409 ipw_adapter_restart(priv);
@@ -9398,9 +9413,9 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9398 * module parameter, so take appropriate action */ 9413 * module parameter, so take appropriate action */
9399 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); 9414 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9400 9415
9401 up(&priv->sem); 9416 mutex_unlock(&priv->mutex);
9402 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); 9417 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9403 down(&priv->sem); 9418 mutex_lock(&priv->mutex);
9404 9419
9405 if (!(priv->status & STATUS_RF_KILL_MASK)) { 9420 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9406 /* Configuration likely changed -- force [re]association */ 9421 /* Configuration likely changed -- force [re]association */
@@ -9410,7 +9425,7 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9410 ipw_associate(priv); 9425 ipw_associate(priv);
9411 } 9426 }
9412 9427
9413 up(&priv->sem); 9428 mutex_unlock(&priv->mutex);
9414 9429
9415 return 0; 9430 return 0;
9416} 9431}
@@ -9423,6 +9438,8 @@ static iw_handler ipw_wx_handlers[] = {
9423 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 9438 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9424 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 9439 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9425 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, 9440 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9441 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9442 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9426 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, 9443 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9427 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, 9444 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9428 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, 9445 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
@@ -9568,7 +9585,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9568 wstats->qual.level = average_value(&priv->average_rssi); 9585 wstats->qual.level = average_value(&priv->average_rssi);
9569 wstats->qual.noise = average_value(&priv->average_noise); 9586 wstats->qual.noise = average_value(&priv->average_noise);
9570 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | 9587 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9571 IW_QUAL_NOISE_UPDATED; 9588 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9572 9589
9573 wstats->miss.beacon = average_value(&priv->average_missed_beacons); 9590 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9574 wstats->discard.retries = priv->last_tx_failures; 9591 wstats->discard.retries = priv->last_tx_failures;
@@ -9586,7 +9603,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9586static void init_sys_config(struct ipw_sys_config *sys_config) 9603static void init_sys_config(struct ipw_sys_config *sys_config)
9587{ 9604{
9588 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 9605 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9589 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */ 9606 sys_config->bt_coexistence = 0;
9590 sys_config->answer_broadcast_ssid_probe = 0; 9607 sys_config->answer_broadcast_ssid_probe = 0;
9591 sys_config->accept_all_data_frames = 0; 9608 sys_config->accept_all_data_frames = 0;
9592 sys_config->accept_non_directed_frames = 1; 9609 sys_config->accept_non_directed_frames = 1;
@@ -9594,12 +9611,13 @@ static void init_sys_config(struct ipw_sys_config *sys_config)
9594 sys_config->disable_unicast_decryption = 1; 9611 sys_config->disable_unicast_decryption = 1;
9595 sys_config->exclude_multicast_unencrypted = 0; 9612 sys_config->exclude_multicast_unencrypted = 0;
9596 sys_config->disable_multicast_decryption = 1; 9613 sys_config->disable_multicast_decryption = 1;
9597 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH; 9614 sys_config->antenna_diversity = CFG_SYS_ANTENNA_SLOW_DIV;
9598 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ 9615 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9599 sys_config->dot11g_auto_detection = 0; 9616 sys_config->dot11g_auto_detection = 0;
9600 sys_config->enable_cts_to_self = 0; 9617 sys_config->enable_cts_to_self = 0;
9601 sys_config->bt_coexist_collision_thr = 0; 9618 sys_config->bt_coexist_collision_thr = 0;
9602 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256 9619 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9620 sys_config->silence_threshold = 0x1e;
9603} 9621}
9604 9622
9605static int ipw_net_open(struct net_device *dev) 9623static int ipw_net_open(struct net_device *dev)
@@ -9607,11 +9625,11 @@ static int ipw_net_open(struct net_device *dev)
9607 struct ipw_priv *priv = ieee80211_priv(dev); 9625 struct ipw_priv *priv = ieee80211_priv(dev);
9608 IPW_DEBUG_INFO("dev->open\n"); 9626 IPW_DEBUG_INFO("dev->open\n");
9609 /* we should be verifying the device is ready to be opened */ 9627 /* we should be verifying the device is ready to be opened */
9610 down(&priv->sem); 9628 mutex_lock(&priv->mutex);
9611 if (!(priv->status & STATUS_RF_KILL_MASK) && 9629 if (!(priv->status & STATUS_RF_KILL_MASK) &&
9612 (priv->status & STATUS_ASSOCIATED)) 9630 (priv->status & STATUS_ASSOCIATED))
9613 netif_start_queue(dev); 9631 netif_start_queue(dev);
9614 up(&priv->sem); 9632 mutex_unlock(&priv->mutex);
9615 return 0; 9633 return 0;
9616} 9634}
9617 9635
@@ -9647,11 +9665,6 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9647 u16 remaining_bytes; 9665 u16 remaining_bytes;
9648 int fc; 9666 int fc;
9649 9667
9650 /* If there isn't room in the queue, we return busy and let the
9651 * network stack requeue the packet for us */
9652 if (ipw_queue_space(q) < q->high_mark)
9653 return NETDEV_TX_BUSY;
9654
9655 switch (priv->ieee->iw_mode) { 9668 switch (priv->ieee->iw_mode) {
9656 case IW_MODE_ADHOC: 9669 case IW_MODE_ADHOC:
9657 hdr_len = IEEE80211_3ADDR_LEN; 9670 hdr_len = IEEE80211_3ADDR_LEN;
@@ -9817,6 +9830,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9817 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 9830 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
9818 ipw_write32(priv, q->reg_w, q->first_empty); 9831 ipw_write32(priv, q->reg_w, q->first_empty);
9819 9832
9833 if (ipw_queue_space(q) < q->high_mark)
9834 netif_stop_queue(priv->net_dev);
9835
9820 return NETDEV_TX_OK; 9836 return NETDEV_TX_OK;
9821 9837
9822 drop: 9838 drop:
@@ -9890,13 +9906,13 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p)
9890 struct sockaddr *addr = p; 9906 struct sockaddr *addr = p;
9891 if (!is_valid_ether_addr(addr->sa_data)) 9907 if (!is_valid_ether_addr(addr->sa_data))
9892 return -EADDRNOTAVAIL; 9908 return -EADDRNOTAVAIL;
9893 down(&priv->sem); 9909 mutex_lock(&priv->mutex);
9894 priv->config |= CFG_CUSTOM_MAC; 9910 priv->config |= CFG_CUSTOM_MAC;
9895 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 9911 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
9896 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n", 9912 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
9897 priv->net_dev->name, MAC_ARG(priv->mac_addr)); 9913 priv->net_dev->name, MAC_ARG(priv->mac_addr));
9898 queue_work(priv->workqueue, &priv->adapter_restart); 9914 queue_work(priv->workqueue, &priv->adapter_restart);
9899 up(&priv->sem); 9915 mutex_unlock(&priv->mutex);
9900 return 0; 9916 return 0;
9901} 9917}
9902 9918
@@ -9940,9 +9956,9 @@ static int ipw_ethtool_get_eeprom(struct net_device *dev,
9940 9956
9941 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 9957 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9942 return -EINVAL; 9958 return -EINVAL;
9943 down(&p->sem); 9959 mutex_lock(&p->mutex);
9944 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); 9960 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
9945 up(&p->sem); 9961 mutex_unlock(&p->mutex);
9946 return 0; 9962 return 0;
9947} 9963}
9948 9964
@@ -9954,12 +9970,11 @@ static int ipw_ethtool_set_eeprom(struct net_device *dev,
9954 9970
9955 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 9971 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9956 return -EINVAL; 9972 return -EINVAL;
9957 down(&p->sem); 9973 mutex_lock(&p->mutex);
9958 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); 9974 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
9959 for (i = IPW_EEPROM_DATA; 9975 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
9960 i < IPW_EEPROM_DATA + IPW_EEPROM_IMAGE_SIZE; i++) 9976 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
9961 ipw_write8(p, i, p->eeprom[i]); 9977 mutex_unlock(&p->mutex);
9962 up(&p->sem);
9963 return 0; 9978 return 0;
9964} 9979}
9965 9980
@@ -10054,12 +10069,12 @@ static void ipw_rf_kill(void *adapter)
10054static void ipw_bg_rf_kill(void *data) 10069static void ipw_bg_rf_kill(void *data)
10055{ 10070{
10056 struct ipw_priv *priv = data; 10071 struct ipw_priv *priv = data;
10057 down(&priv->sem); 10072 mutex_lock(&priv->mutex);
10058 ipw_rf_kill(data); 10073 ipw_rf_kill(data);
10059 up(&priv->sem); 10074 mutex_unlock(&priv->mutex);
10060} 10075}
10061 10076
10062void ipw_link_up(struct ipw_priv *priv) 10077static void ipw_link_up(struct ipw_priv *priv)
10063{ 10078{
10064 priv->last_seq_num = -1; 10079 priv->last_seq_num = -1;
10065 priv->last_frag_num = -1; 10080 priv->last_frag_num = -1;
@@ -10089,12 +10104,12 @@ void ipw_link_up(struct ipw_priv *priv)
10089static void ipw_bg_link_up(void *data) 10104static void ipw_bg_link_up(void *data)
10090{ 10105{
10091 struct ipw_priv *priv = data; 10106 struct ipw_priv *priv = data;
10092 down(&priv->sem); 10107 mutex_lock(&priv->mutex);
10093 ipw_link_up(data); 10108 ipw_link_up(data);
10094 up(&priv->sem); 10109 mutex_unlock(&priv->mutex);
10095} 10110}
10096 10111
10097void ipw_link_down(struct ipw_priv *priv) 10112static void ipw_link_down(struct ipw_priv *priv)
10098{ 10113{
10099 ipw_led_link_down(priv); 10114 ipw_led_link_down(priv);
10100 netif_carrier_off(priv->net_dev); 10115 netif_carrier_off(priv->net_dev);
@@ -10117,9 +10132,9 @@ void ipw_link_down(struct ipw_priv *priv)
10117static void ipw_bg_link_down(void *data) 10132static void ipw_bg_link_down(void *data)
10118{ 10133{
10119 struct ipw_priv *priv = data; 10134 struct ipw_priv *priv = data;
10120 down(&priv->sem); 10135 mutex_lock(&priv->mutex);
10121 ipw_link_down(data); 10136 ipw_link_down(data);
10122 up(&priv->sem); 10137 mutex_unlock(&priv->mutex);
10123} 10138}
10124 10139
10125static int ipw_setup_deferred_work(struct ipw_priv *priv) 10140static int ipw_setup_deferred_work(struct ipw_priv *priv)
@@ -10292,6 +10307,20 @@ static int ipw_config(struct ipw_priv *priv)
10292 10307
10293 /* set basic system config settings */ 10308 /* set basic system config settings */
10294 init_sys_config(&priv->sys_config); 10309 init_sys_config(&priv->sys_config);
10310
10311 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10312 * Does not support BT priority yet (don't abort or defer our Tx) */
10313 if (bt_coexist) {
10314 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10315
10316 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10317 priv->sys_config.bt_coexistence
10318 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10319 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10320 priv->sys_config.bt_coexistence
10321 |= CFG_BT_COEXISTENCE_OOB;
10322 }
10323
10295 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 10324 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10296 priv->sys_config.answer_broadcast_ssid_probe = 1; 10325 priv->sys_config.answer_broadcast_ssid_probe = 1;
10297 else 10326 else
@@ -10349,6 +10378,9 @@ static int ipw_config(struct ipw_priv *priv)
10349 * not intended for resale of the above mentioned Intel adapters has 10378 * not intended for resale of the above mentioned Intel adapters has
10350 * not been tested. 10379 * not been tested.
10351 * 10380 *
10381 * Remember to update the table in README.ipw2200 when changing this
10382 * table.
10383 *
10352 */ 10384 */
10353static const struct ieee80211_geo ipw_geos[] = { 10385static const struct ieee80211_geo ipw_geos[] = {
10354 { /* Restricted */ 10386 { /* Restricted */
@@ -10596,96 +10628,6 @@ static const struct ieee80211_geo ipw_geos[] = {
10596 } 10628 }
10597}; 10629};
10598 10630
10599/* GEO code borrowed from ieee80211_geo.c */
10600static int ipw_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
10601{
10602 int i;
10603
10604 /* Driver needs to initialize the geography map before using
10605 * these helper functions */
10606 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10607
10608 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10609 for (i = 0; i < ieee->geo.bg_channels; i++)
10610 /* NOTE: If G mode is currently supported but
10611 * this is a B only channel, we don't see it
10612 * as valid. */
10613 if ((ieee->geo.bg[i].channel == channel) &&
10614 (!(ieee->mode & IEEE_G) ||
10615 !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
10616 return IEEE80211_24GHZ_BAND;
10617
10618 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10619 for (i = 0; i < ieee->geo.a_channels; i++)
10620 if (ieee->geo.a[i].channel == channel)
10621 return IEEE80211_52GHZ_BAND;
10622
10623 return 0;
10624}
10625
10626static int ipw_channel_to_index(struct ieee80211_device *ieee, u8 channel)
10627{
10628 int i;
10629
10630 /* Driver needs to initialize the geography map before using
10631 * these helper functions */
10632 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10633
10634 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10635 for (i = 0; i < ieee->geo.bg_channels; i++)
10636 if (ieee->geo.bg[i].channel == channel)
10637 return i;
10638
10639 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10640 for (i = 0; i < ieee->geo.a_channels; i++)
10641 if (ieee->geo.a[i].channel == channel)
10642 return i;
10643
10644 return -1;
10645}
10646
10647static u8 ipw_freq_to_channel(struct ieee80211_device *ieee, u32 freq)
10648{
10649 int i;
10650
10651 /* Driver needs to initialize the geography map before using
10652 * these helper functions */
10653 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10654
10655 freq /= 100000;
10656
10657 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10658 for (i = 0; i < ieee->geo.bg_channels; i++)
10659 if (ieee->geo.bg[i].freq == freq)
10660 return ieee->geo.bg[i].channel;
10661
10662 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10663 for (i = 0; i < ieee->geo.a_channels; i++)
10664 if (ieee->geo.a[i].freq == freq)
10665 return ieee->geo.a[i].channel;
10666
10667 return 0;
10668}
10669
10670static int ipw_set_geo(struct ieee80211_device *ieee,
10671 const struct ieee80211_geo *geo)
10672{
10673 memcpy(ieee->geo.name, geo->name, 3);
10674 ieee->geo.name[3] = '\0';
10675 ieee->geo.bg_channels = geo->bg_channels;
10676 ieee->geo.a_channels = geo->a_channels;
10677 memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
10678 sizeof(struct ieee80211_channel));
10679 memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
10680 sizeof(struct ieee80211_channel));
10681 return 0;
10682}
10683
10684static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *ieee)
10685{
10686 return &ieee->geo;
10687}
10688
10689#define MAX_HW_RESTARTS 5 10631#define MAX_HW_RESTARTS 5
10690static int ipw_up(struct ipw_priv *priv) 10632static int ipw_up(struct ipw_priv *priv)
10691{ 10633{
@@ -10732,14 +10674,11 @@ static int ipw_up(struct ipw_priv *priv)
10732 priv->eeprom[EEPROM_COUNTRY_CODE + 2]); 10674 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
10733 j = 0; 10675 j = 0;
10734 } 10676 }
10735 if (ipw_set_geo(priv->ieee, &ipw_geos[j])) { 10677 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
10736 IPW_WARNING("Could not set geography."); 10678 IPW_WARNING("Could not set geography.");
10737 return 0; 10679 return 0;
10738 } 10680 }
10739 10681
10740 IPW_DEBUG_INFO("Geography %03d [%s] detected.\n",
10741 j, priv->ieee->geo.name);
10742
10743 if (priv->status & STATUS_RF_KILL_SW) { 10682 if (priv->status & STATUS_RF_KILL_SW) {
10744 IPW_WARNING("Radio disabled by module parameter.\n"); 10683 IPW_WARNING("Radio disabled by module parameter.\n");
10745 return 0; 10684 return 0;
@@ -10782,9 +10721,9 @@ static int ipw_up(struct ipw_priv *priv)
10782static void ipw_bg_up(void *data) 10721static void ipw_bg_up(void *data)
10783{ 10722{
10784 struct ipw_priv *priv = data; 10723 struct ipw_priv *priv = data;
10785 down(&priv->sem); 10724 mutex_lock(&priv->mutex);
10786 ipw_up(data); 10725 ipw_up(data);
10787 up(&priv->sem); 10726 mutex_unlock(&priv->mutex);
10788} 10727}
10789 10728
10790static void ipw_deinit(struct ipw_priv *priv) 10729static void ipw_deinit(struct ipw_priv *priv)
@@ -10853,23 +10792,23 @@ static void ipw_down(struct ipw_priv *priv)
10853static void ipw_bg_down(void *data) 10792static void ipw_bg_down(void *data)
10854{ 10793{
10855 struct ipw_priv *priv = data; 10794 struct ipw_priv *priv = data;
10856 down(&priv->sem); 10795 mutex_lock(&priv->mutex);
10857 ipw_down(data); 10796 ipw_down(data);
10858 up(&priv->sem); 10797 mutex_unlock(&priv->mutex);
10859} 10798}
10860 10799
10861/* Called by register_netdev() */ 10800/* Called by register_netdev() */
10862static int ipw_net_init(struct net_device *dev) 10801static int ipw_net_init(struct net_device *dev)
10863{ 10802{
10864 struct ipw_priv *priv = ieee80211_priv(dev); 10803 struct ipw_priv *priv = ieee80211_priv(dev);
10865 down(&priv->sem); 10804 mutex_lock(&priv->mutex);
10866 10805
10867 if (ipw_up(priv)) { 10806 if (ipw_up(priv)) {
10868 up(&priv->sem); 10807 mutex_unlock(&priv->mutex);
10869 return -EIO; 10808 return -EIO;
10870 } 10809 }
10871 10810
10872 up(&priv->sem); 10811 mutex_unlock(&priv->mutex);
10873 return 0; 10812 return 0;
10874} 10813}
10875 10814
@@ -10959,7 +10898,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10959 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) 10898 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
10960 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); 10899 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
10961 10900
10962 init_MUTEX(&priv->sem); 10901 mutex_init(&priv->mutex);
10963 if (pci_enable_device(pdev)) { 10902 if (pci_enable_device(pdev)) {
10964 err = -ENODEV; 10903 err = -ENODEV;
10965 goto out_free_ieee80211; 10904 goto out_free_ieee80211;
@@ -11017,7 +10956,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11017 SET_MODULE_OWNER(net_dev); 10956 SET_MODULE_OWNER(net_dev);
11018 SET_NETDEV_DEV(net_dev, &pdev->dev); 10957 SET_NETDEV_DEV(net_dev, &pdev->dev);
11019 10958
11020 down(&priv->sem); 10959 mutex_lock(&priv->mutex);
11021 10960
11022 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; 10961 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11023 priv->ieee->set_security = shim__set_security; 10962 priv->ieee->set_security = shim__set_security;
@@ -11050,16 +10989,22 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11050 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); 10989 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11051 if (err) { 10990 if (err) {
11052 IPW_ERROR("failed to create sysfs device attributes\n"); 10991 IPW_ERROR("failed to create sysfs device attributes\n");
11053 up(&priv->sem); 10992 mutex_unlock(&priv->mutex);
11054 goto out_release_irq; 10993 goto out_release_irq;
11055 } 10994 }
11056 10995
11057 up(&priv->sem); 10996 mutex_unlock(&priv->mutex);
11058 err = register_netdev(net_dev); 10997 err = register_netdev(net_dev);
11059 if (err) { 10998 if (err) {
11060 IPW_ERROR("failed to register network device\n"); 10999 IPW_ERROR("failed to register network device\n");
11061 goto out_remove_sysfs; 11000 goto out_remove_sysfs;
11062 } 11001 }
11002
11003 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11004 "channels, %d 802.11a channels)\n",
11005 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11006 priv->ieee->geo.a_channels);
11007
11063 return 0; 11008 return 0;
11064 11009
11065 out_remove_sysfs: 11010 out_remove_sysfs:
@@ -11091,13 +11036,13 @@ static void ipw_pci_remove(struct pci_dev *pdev)
11091 if (!priv) 11036 if (!priv)
11092 return; 11037 return;
11093 11038
11094 down(&priv->sem); 11039 mutex_lock(&priv->mutex);
11095 11040
11096 priv->status |= STATUS_EXIT_PENDING; 11041 priv->status |= STATUS_EXIT_PENDING;
11097 ipw_down(priv); 11042 ipw_down(priv);
11098 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11043 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11099 11044
11100 up(&priv->sem); 11045 mutex_unlock(&priv->mutex);
11101 11046
11102 unregister_netdev(priv->net_dev); 11047 unregister_netdev(priv->net_dev);
11103 11048
@@ -11250,8 +11195,10 @@ MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11250module_param(led, int, 0444); 11195module_param(led, int, 0444);
11251MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); 11196MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11252 11197
11198#ifdef CONFIG_IPW2200_DEBUG
11253module_param(debug, int, 0444); 11199module_param(debug, int, 0444);
11254MODULE_PARM_DESC(debug, "debug output mask"); 11200MODULE_PARM_DESC(debug, "debug output mask");
11201#endif
11255 11202
11256module_param(channel, int, 0444); 11203module_param(channel, int, 0444);
11257MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); 11204MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
@@ -11281,12 +11228,18 @@ module_param(mode, int, 0444);
11281MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); 11228MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11282#endif 11229#endif
11283 11230
11231module_param(bt_coexist, int, 0444);
11232MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11233
11284module_param(hwcrypto, int, 0444); 11234module_param(hwcrypto, int, 0444);
11285MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default on)"); 11235MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11286 11236
11287module_param(cmdlog, int, 0444); 11237module_param(cmdlog, int, 0444);
11288MODULE_PARM_DESC(cmdlog, 11238MODULE_PARM_DESC(cmdlog,
11289 "allocate a ring buffer for logging firmware commands"); 11239 "allocate a ring buffer for logging firmware commands");
11290 11240
11241module_param(roaming, int, 0444);
11242MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11243
11291module_exit(ipw_exit); 11244module_exit(ipw_exit);
11292module_init(ipw_init); 11245module_init(ipw_init);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index e65620a4d79e..4b9804900702 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. 3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4 4
5 This program is free software; you can redistribute it and/or modify it 5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as 6 under the terms of version 2 of the GNU General Public License as
@@ -33,6 +33,7 @@
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34#include <linux/config.h> 34#include <linux/config.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/mutex.h>
36 37
37#include <linux/pci.h> 38#include <linux/pci.h>
38#include <linux/netdevice.h> 39#include <linux/netdevice.h>
@@ -46,6 +47,7 @@
46#include <linux/firmware.h> 47#include <linux/firmware.h>
47#include <linux/wireless.h> 48#include <linux/wireless.h>
48#include <linux/dma-mapping.h> 49#include <linux/dma-mapping.h>
50#include <linux/jiffies.h>
49#include <asm/io.h> 51#include <asm/io.h>
50 52
51#include <net/ieee80211.h> 53#include <net/ieee80211.h>
@@ -244,8 +246,10 @@ enum connection_manager_assoc_states {
244#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31 246#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31
245 247
246#define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1 248#define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1
247#define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 24 249#define IPW_MB_ROAMING_THRESHOLD_MIN 1
248#define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8 250#define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8
251#define IPW_MB_ROAMING_THRESHOLD_MAX 30
252#define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 3*IPW_MB_ROAMING_THRESHOLD_DEFAULT
249#define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300 253#define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300
250 254
251#define MACADRR_BYTE_LEN 6 255#define MACADRR_BYTE_LEN 6
@@ -616,13 +620,16 @@ struct notif_tgi_tx_key {
616 u8 reserved; 620 u8 reserved;
617} __attribute__ ((packed)); 621} __attribute__ ((packed));
618 622
623#define SILENCE_OVER_THRESH (1)
624#define SILENCE_UNDER_THRESH (2)
625
619struct notif_link_deterioration { 626struct notif_link_deterioration {
620 struct ipw_cmd_stats stats; 627 struct ipw_cmd_stats stats;
621 u8 rate; 628 u8 rate;
622 u8 modulation; 629 u8 modulation;
623 struct rate_histogram histogram; 630 struct rate_histogram histogram;
624 u8 reserved1; 631 u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */
625 u16 reserved2; 632 u16 silence_count;
626} __attribute__ ((packed)); 633} __attribute__ ((packed));
627 634
628struct notif_association { 635struct notif_association {
@@ -780,7 +787,7 @@ struct ipw_sys_config {
780 u8 enable_cts_to_self; 787 u8 enable_cts_to_self;
781 u8 enable_multicast_filtering; 788 u8 enable_multicast_filtering;
782 u8 bt_coexist_collision_thr; 789 u8 bt_coexist_collision_thr;
783 u8 reserved2; 790 u8 silence_threshold;
784 u8 accept_all_mgmt_bcpr; 791 u8 accept_all_mgmt_bcpr;
785 u8 accept_all_mgtm_frames; 792 u8 accept_all_mgtm_frames;
786 u8 pass_noise_stats_to_host; 793 u8 pass_noise_stats_to_host;
@@ -852,7 +859,7 @@ struct ipw_scan_request_ext {
852 u16 dwell_time[IPW_SCAN_TYPES]; 859 u16 dwell_time[IPW_SCAN_TYPES];
853} __attribute__ ((packed)); 860} __attribute__ ((packed));
854 861
855extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) 862static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
856{ 863{
857 if (index % 2) 864 if (index % 2)
858 return scan->scan_type[index / 2] & 0x0F; 865 return scan->scan_type[index / 2] & 0x0F;
@@ -860,7 +867,7 @@ extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
860 return (scan->scan_type[index / 2] & 0xF0) >> 4; 867 return (scan->scan_type[index / 2] & 0xF0) >> 4;
861} 868}
862 869
863extern inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan, 870static inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan,
864 u8 index, u8 scan_type) 871 u8 index, u8 scan_type)
865{ 872{
866 if (index % 2) 873 if (index % 2)
@@ -1120,7 +1127,7 @@ struct ipw_priv {
1120 struct ieee80211_device *ieee; 1127 struct ieee80211_device *ieee;
1121 1128
1122 spinlock_t lock; 1129 spinlock_t lock;
1123 struct semaphore sem; 1130 struct mutex mutex;
1124 1131
1125 /* basic pci-network driver stuff */ 1132 /* basic pci-network driver stuff */
1126 struct pci_dev *pci_dev; 1133 struct pci_dev *pci_dev;
@@ -1406,13 +1413,6 @@ do { if (ipw_debug_level & (level)) \
1406* Register bit definitions 1413* Register bit definitions
1407*/ 1414*/
1408 1415
1409/* Dino control registers bits */
1410
1411#define DINO_ENABLE_SYSTEM 0x80
1412#define DINO_ENABLE_CS 0x40
1413#define DINO_RXFIFO_DATA 0x01
1414#define DINO_CONTROL_REG 0x00200000
1415
1416#define IPW_INTA_RW 0x00000008 1416#define IPW_INTA_RW 0x00000008
1417#define IPW_INTA_MASK_R 0x0000000C 1417#define IPW_INTA_MASK_R 0x0000000C
1418#define IPW_INDIRECT_ADDR 0x00000010 1418#define IPW_INDIRECT_ADDR 0x00000010
@@ -1459,6 +1459,11 @@ do { if (ipw_debug_level & (level)) \
1459#define IPW_DOMAIN_0_END 0x1000 1459#define IPW_DOMAIN_0_END 0x1000
1460#define CLX_MEM_BAR_SIZE 0x1000 1460#define CLX_MEM_BAR_SIZE 0x1000
1461 1461
1462/* Dino/baseband control registers bits */
1463
1464#define DINO_ENABLE_SYSTEM 0x80 /* 1 = baseband processor on, 0 = reset */
1465#define DINO_ENABLE_CS 0x40 /* 1 = enable ucode load */
1466#define DINO_RXFIFO_DATA 0x01 /* 1 = data available */
1462#define IPW_BASEBAND_CONTROL_STATUS 0X00200000 1467#define IPW_BASEBAND_CONTROL_STATUS 0X00200000
1463#define IPW_BASEBAND_TX_FIFO_WRITE 0X00200004 1468#define IPW_BASEBAND_TX_FIFO_WRITE 0X00200004
1464#define IPW_BASEBAND_RX_FIFO_READ 0X00200004 1469#define IPW_BASEBAND_RX_FIFO_READ 0X00200004
@@ -1567,13 +1572,18 @@ do { if (ipw_debug_level & (level)) \
1567#define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */ 1572#define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */
1568#define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */ 1573#define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */
1569 1574
1570/* NIC type as found in the one byte EEPROM_NIC_TYPE offset*/ 1575/* NIC type as found in the one byte EEPROM_NIC_TYPE offset */
1571#define EEPROM_NIC_TYPE_0 0 1576#define EEPROM_NIC_TYPE_0 0
1572#define EEPROM_NIC_TYPE_1 1 1577#define EEPROM_NIC_TYPE_1 1
1573#define EEPROM_NIC_TYPE_2 2 1578#define EEPROM_NIC_TYPE_2 2
1574#define EEPROM_NIC_TYPE_3 3 1579#define EEPROM_NIC_TYPE_3 3
1575#define EEPROM_NIC_TYPE_4 4 1580#define EEPROM_NIC_TYPE_4 4
1576 1581
1582/* Bluetooth Coexistence capabilities as found in EEPROM_SKU_CAPABILITY */
1583#define EEPROM_SKU_CAP_BT_CHANNEL_SIG 0x01 /* we can tell BT our channel # */
1584#define EEPROM_SKU_CAP_BT_PRIORITY 0x02 /* BT can take priority over us */
1585#define EEPROM_SKU_CAP_BT_OOB 0x04 /* we can signal BT out-of-band */
1586
1577#define FW_MEM_REG_LOWER_BOUND 0x00300000 1587#define FW_MEM_REG_LOWER_BOUND 0x00300000
1578#define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40) 1588#define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40)
1579#define IPW_EVENT_REG (FW_MEM_REG_LOWER_BOUND + 0x04) 1589#define IPW_EVENT_REG (FW_MEM_REG_LOWER_BOUND + 0x04)
@@ -1658,9 +1668,10 @@ enum {
1658 IPW_FW_ERROR_FATAL_ERROR 1668 IPW_FW_ERROR_FATAL_ERROR
1659}; 1669};
1660 1670
1661#define AUTH_OPEN 0 1671#define AUTH_OPEN 0
1662#define AUTH_SHARED_KEY 1 1672#define AUTH_SHARED_KEY 1
1663#define AUTH_IGNORE 3 1673#define AUTH_LEAP 2
1674#define AUTH_IGNORE 3
1664 1675
1665#define HC_ASSOCIATE 0 1676#define HC_ASSOCIATE 0
1666#define HC_REASSOCIATE 1 1677#define HC_REASSOCIATE 1
@@ -1860,7 +1871,7 @@ struct host_cmd {
1860 u8 cmd; 1871 u8 cmd;
1861 u8 len; 1872 u8 len;
1862 u16 reserved; 1873 u16 reserved;
1863 u32 param[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; 1874 u32 *param;
1864} __attribute__ ((packed)); 1875} __attribute__ ((packed));
1865 1876
1866struct ipw_cmd_log { 1877struct ipw_cmd_log {
@@ -1869,21 +1880,24 @@ struct ipw_cmd_log {
1869 struct host_cmd cmd; 1880 struct host_cmd cmd;
1870}; 1881};
1871 1882
1872#define CFG_BT_COEXISTENCE_MIN 0x00 1883/* SysConfig command parameters ... */
1873#define CFG_BT_COEXISTENCE_DEFER 0x02 1884/* bt_coexistence param */
1874#define CFG_BT_COEXISTENCE_KILL 0x04 1885#define CFG_BT_COEXISTENCE_SIGNAL_CHNL 0x01 /* tell BT our chnl # */
1875#define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08 1886#define CFG_BT_COEXISTENCE_DEFER 0x02 /* defer our Tx if BT traffic */
1876#define CFG_BT_COEXISTENCE_OOB 0x10 1887#define CFG_BT_COEXISTENCE_KILL 0x04 /* kill our Tx if BT traffic */
1877#define CFG_BT_COEXISTENCE_MAX 0xFF 1888#define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08 /* multimedia extensions */
1878#define CFG_BT_COEXISTENCE_DEF 0x80 /* read Bt from EEPROM */ 1889#define CFG_BT_COEXISTENCE_OOB 0x10 /* signal BT via out-of-band */
1879 1890
1880#define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x0 1891/* clear-to-send to self param */
1881#define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x1 1892#define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x00
1893#define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x01
1882#define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN 1894#define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN
1883 1895
1884#define CFG_SYS_ANTENNA_BOTH 0x000 1896/* Antenna diversity param (h/w can select best antenna, based on signal) */
1885#define CFG_SYS_ANTENNA_A 0x001 1897#define CFG_SYS_ANTENNA_BOTH 0x00 /* NIC selects best antenna */
1886#define CFG_SYS_ANTENNA_B 0x003 1898#define CFG_SYS_ANTENNA_A 0x01 /* force antenna A */
1899#define CFG_SYS_ANTENNA_B 0x03 /* force antenna B */
1900#define CFG_SYS_ANTENNA_SLOW_DIV 0x02 /* consider background noise */
1887 1901
1888/* 1902/*
1889 * The definitions below were lifted off the ipw2100 driver, which only 1903 * The definitions below were lifted off the ipw2100 driver, which only
@@ -1899,27 +1913,4 @@ struct ipw_cmd_log {
1899 1913
1900#define IPW_MAX_CONFIG_RETRIES 10 1914#define IPW_MAX_CONFIG_RETRIES 10
1901 1915
1902static inline u32 frame_hdr_len(struct ieee80211_hdr_4addr *hdr)
1903{
1904 u32 retval;
1905 u16 fc;
1906
1907 retval = sizeof(struct ieee80211_hdr_3addr);
1908 fc = le16_to_cpu(hdr->frame_ctl);
1909
1910 /*
1911 * Function ToDS FromDS
1912 * IBSS 0 0
1913 * To AP 1 0
1914 * From AP 0 1
1915 * WDS (bridge) 1 1
1916 *
1917 * Only WDS frames use Address4 among them. --YZ
1918 */
1919 if (!(fc & IEEE80211_FCTL_TODS) || !(fc & IEEE80211_FCTL_FROMDS))
1920 retval -= ETH_ALEN;
1921
1922 return retval;
1923}
1924
1925#endif /* __ipw2200_h__ */ 1916#endif /* __ipw2200_h__ */
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index bf6271ee387a..75ce6ddb0cf5 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -55,10 +55,8 @@
55#include <linux/etherdevice.h> 55#include <linux/etherdevice.h>
56#include <linux/skbuff.h> 56#include <linux/skbuff.h>
57#include <linux/bitops.h> 57#include <linux/bitops.h>
58#ifdef CONFIG_NET_RADIO
59#include <linux/wireless.h> 58#include <linux/wireless.h>
60#include <net/iw_handler.h> 59#include <net/iw_handler.h>
61#endif
62 60
63#include <pcmcia/cs_types.h> 61#include <pcmcia/cs_types.h>
64#include <pcmcia/cs.h> 62#include <pcmcia/cs.h>
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 18baacfc5a2c..18a44580b53b 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -112,7 +112,7 @@ static const char StripVersion[] = "1.3A-STUART.CHESHIRE";
112#include <linux/ip.h> 112#include <linux/ip.h>
113#include <linux/tcp.h> 113#include <linux/tcp.h>
114#include <linux/time.h> 114#include <linux/time.h>
115 115#include <linux/jiffies.h>
116 116
117/************************************************************************/ 117/************************************************************************/
118/* Useful structures and definitions */ 118/* Useful structures and definitions */
@@ -1569,7 +1569,7 @@ static int strip_xmit(struct sk_buff *skb, struct net_device *dev)
1569 del_timer(&strip_info->idle_timer); 1569 del_timer(&strip_info->idle_timer);
1570 1570
1571 1571
1572 if (jiffies - strip_info->pps_timer > HZ) { 1572 if (time_after(jiffies, strip_info->pps_timer + HZ)) {
1573 unsigned long t = jiffies - strip_info->pps_timer; 1573 unsigned long t = jiffies - strip_info->pps_timer;
1574 unsigned long rx_pps_count = (strip_info->rx_pps_count * HZ * 8 + t / 2) / t; 1574 unsigned long rx_pps_count = (strip_info->rx_pps_count * HZ * 8 + t / 2) / t;
1575 unsigned long tx_pps_count = (strip_info->tx_pps_count * HZ * 8 + t / 2) / t; 1575 unsigned long tx_pps_count = (strip_info->tx_pps_count * HZ * 8 + t / 2) / t;
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h
index 166e28b9a4f7..5cb0bc8bb128 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/net/wireless/wavelan.p.h
@@ -98,11 +98,7 @@
98 * characteristics of the hardware. Applications such as mobile IP may 98 * characteristics of the hardware. Applications such as mobile IP may
99 * take advantage of it. 99 * take advantage of it.
100 * 100 *
101 * You will need to enable the CONFIG_NET_RADIO define in the kernel 101 * It might be a good idea as well to fetch the wireless tools to
102 * configuration to enable the wireless extensions (this is the one
103 * giving access to the radio network device choice).
104 *
105 * It might also be a good idea as well to fetch the wireless tools to
106 * configure the device and play a bit. 102 * configure the device and play a bit.
107 */ 103 */
108 104
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index f2d597568151..451f6271dcbc 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -99,11 +99,7 @@
99 * caracteristics of the hardware in a standard way and support for 99 * caracteristics of the hardware in a standard way and support for
100 * applications for taking advantage of it (like Mobile IP). 100 * applications for taking advantage of it (like Mobile IP).
101 * 101 *
102 * You will need to enable the CONFIG_NET_RADIO define in the kernel 102 * It might be a good idea as well to fetch the wireless tools to
103 * configuration to enable the wireless extensions (this is the one
104 * giving access to the radio network device choice).
105 *
106 * It might also be a good idea as well to fetch the wireless tools to
107 * configure the device and play a bit. 103 * configure the device and play a bit.
108 */ 104 */
109 105
@@ -440,11 +436,8 @@
440#include <linux/ioport.h> 436#include <linux/ioport.h>
441#include <linux/fcntl.h> 437#include <linux/fcntl.h>
442#include <linux/ethtool.h> 438#include <linux/ethtool.h>
443
444#ifdef CONFIG_NET_RADIO
445#include <linux/wireless.h> /* Wireless extensions */ 439#include <linux/wireless.h> /* Wireless extensions */
446#include <net/iw_handler.h> /* New driver API */ 440#include <net/iw_handler.h> /* New driver API */
447#endif
448 441
449/* Pcmcia headers that we need */ 442/* Pcmcia headers that we need */
450#include <pcmcia/cs_types.h> 443#include <pcmcia/cs_types.h>
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 1c2506535f7e..75d56bfef0ee 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -69,8 +69,8 @@ static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
69static int dma_ctrl = 0x00CAC277; /* Override when loading module! */ 69static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
70static int fifo_cfg = 0x0028; 70static int fifo_cfg = 0x0028;
71#else 71#else
72static int dma_ctrl = 0x004A0263; /* Constrained by errata */ 72static const int dma_ctrl = 0x004A0263; /* Constrained by errata */
73static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ 73static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
74#endif 74#endif
75 75
76/* Set the copy breakpoint for the copy-only-tiny-frames scheme. 76/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
@@ -266,7 +266,7 @@ struct pci_id_info {
266 int drv_flags; /* Driver use, intended as capability flags. */ 266 int drv_flags; /* Driver use, intended as capability flags. */
267}; 267};
268 268
269static struct pci_id_info pci_id_tbl[] = { 269static const struct pci_id_info pci_id_tbl[] = {
270 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, 270 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
271 PCI_IOTYPE, YELLOWFIN_SIZE, 271 PCI_IOTYPE, YELLOWFIN_SIZE,
272 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, 272 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 8ab6e12153ba..761021603597 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -27,6 +27,7 @@
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
29#include <linux/zorro.h> 29#include <linux/zorro.h>
30#include <linux/jiffies.h>
30 31
31#include <asm/system.h> 32#include <asm/system.h>
32#include <asm/irq.h> 33#include <asm/irq.h>
@@ -151,7 +152,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
151 z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); 152 z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET);
152 153
153 while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) 154 while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
154 if (jiffies - reset_start_time > 2*HZ/100) { 155 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
155 printk(KERN_WARNING " not found (no reset ack).\n"); 156 printk(KERN_WARNING " not found (no reset ack).\n");
156 return -ENODEV; 157 return -ENODEV;
157 } 158 }
@@ -273,7 +274,7 @@ static void zorro8390_reset_8390(struct net_device *dev)
273 274
274 /* This check _should_not_ be necessary, omit eventually. */ 275 /* This check _should_not_ be necessary, omit eventually. */
275 while ((z_readb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) 276 while ((z_readb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
276 if (jiffies - reset_start_time > 2*HZ/100) { 277 if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
277 printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", 278 printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n",
278 dev->name); 279 dev->name);
279 break; 280 break;
@@ -400,7 +401,7 @@ static void zorro8390_block_output(struct net_device *dev, int count,
400 dma_start = jiffies; 401 dma_start = jiffies;
401 402
402 while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) 403 while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
403 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 404 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
404 printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n", 405 printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n",
405 dev->name); 406 dev->name);
406 zorro8390_reset_8390(dev); 407 zorro8390_reset_8390(dev);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 2e727f49ad19..44133250da2e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -273,7 +273,7 @@ removeseg:
273 list_del(&dev_info->lh); 273 list_del(&dev_info->lh);
274 274
275 del_gendisk(dev_info->gd); 275 del_gendisk(dev_info->gd);
276 blk_put_queue(dev_info->dcssblk_queue); 276 blk_cleanup_queue(dev_info->dcssblk_queue);
277 dev_info->gd->queue = NULL; 277 dev_info->gd->queue = NULL;
278 put_disk(dev_info->gd); 278 put_disk(dev_info->gd);
279 device_unregister(dev); 279 device_unregister(dev);
@@ -491,7 +491,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
491unregister_dev: 491unregister_dev:
492 PRINT_ERR("device_create_file() failed!\n"); 492 PRINT_ERR("device_create_file() failed!\n");
493 list_del(&dev_info->lh); 493 list_del(&dev_info->lh);
494 blk_put_queue(dev_info->dcssblk_queue); 494 blk_cleanup_queue(dev_info->dcssblk_queue);
495 dev_info->gd->queue = NULL; 495 dev_info->gd->queue = NULL;
496 put_disk(dev_info->gd); 496 put_disk(dev_info->gd);
497 device_unregister(&dev_info->dev); 497 device_unregister(&dev_info->dev);
@@ -505,7 +505,7 @@ list_del:
505unload_seg: 505unload_seg:
506 segment_unload(local_buf); 506 segment_unload(local_buf);
507dealloc_gendisk: 507dealloc_gendisk:
508 blk_put_queue(dev_info->dcssblk_queue); 508 blk_cleanup_queue(dev_info->dcssblk_queue);
509 dev_info->gd->queue = NULL; 509 dev_info->gd->queue = NULL;
510 put_disk(dev_info->gd); 510 put_disk(dev_info->gd);
511free_dev_info: 511free_dev_info:
@@ -562,7 +562,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
562 list_del(&dev_info->lh); 562 list_del(&dev_info->lh);
563 563
564 del_gendisk(dev_info->gd); 564 del_gendisk(dev_info->gd);
565 blk_put_queue(dev_info->dcssblk_queue); 565 blk_cleanup_queue(dev_info->dcssblk_queue);
566 dev_info->gd->queue = NULL; 566 dev_info->gd->queue = NULL;
567 put_disk(dev_info->gd); 567 put_disk(dev_info->gd);
568 device_unregister(&dev_info->dev); 568 device_unregister(&dev_info->dev);
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 1c8b612d8234..3e156e005f2e 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -440,7 +440,8 @@ static int __init bbc_i2c_init(void)
440 struct linux_ebus_device *edev = NULL; 440 struct linux_ebus_device *edev = NULL;
441 int err, index = 0; 441 int err, index = 0;
442 442
443 if (tlb_type != cheetah || !bbc_present()) 443 if ((tlb_type != cheetah && tlb_type != cheetah_plus) ||
444 !bbc_present())
444 return -ENODEV; 445 return -ENODEV;
445 446
446 for_each_ebus(ebus) { 447 for_each_ebus(ebus) {
@@ -486,3 +487,4 @@ static void bbc_i2c_cleanup(void)
486 487
487module_init(bbc_i2c_init); 488module_init(bbc_i2c_init);
488module_exit(bbc_i2c_cleanup); 489module_exit(bbc_i2c_cleanup);
490MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 320e765fa0cd..15dc2e00e1b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -163,7 +163,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
164zalon7xx-objs := zalon.o ncr53c8xx.o 164zalon7xx-objs := zalon.o ncr53c8xx.o
165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
166libata-objs := libata-core.o libata-scsi.o 166libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
168 168
169# Files generated that shall be removed upon make clean 169# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 559ff7aae3f1..e97ab3e6de4d 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -66,6 +66,9 @@ enum {
66 AHCI_IRQ_ON_SG = (1 << 31), 66 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 67 AHCI_CMD_ATAPI = (1 << 5),
68 AHCI_CMD_WRITE = (1 << 6), 68 AHCI_CMD_WRITE = (1 << 6),
69 AHCI_CMD_PREFETCH = (1 << 7),
70 AHCI_CMD_RESET = (1 << 8),
71 AHCI_CMD_CLR_BUSY = (1 << 10),
69 72
70 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 73 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
71 74
@@ -85,6 +88,7 @@ enum {
85 88
86 /* HOST_CAP bits */ 89 /* HOST_CAP bits */
87 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 90 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
91 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
88 92
89 /* registers for each SATA port */ 93 /* registers for each SATA port */
90 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 94 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -138,6 +142,7 @@ enum {
138 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 142 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
139 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 143 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
140 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 144 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
145 PORT_CMD_CLO = (1 << 3), /* Command list override */
141 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 146 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
142 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 147 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
143 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 148 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
@@ -184,9 +189,9 @@ struct ahci_port_priv {
184static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 189static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
185static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 190static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
186static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 191static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
187static int ahci_qc_issue(struct ata_queued_cmd *qc); 192static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
188static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 193static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
189static void ahci_phy_reset(struct ata_port *ap); 194static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
190static void ahci_irq_clear(struct ata_port *ap); 195static void ahci_irq_clear(struct ata_port *ap);
191static void ahci_eng_timeout(struct ata_port *ap); 196static void ahci_eng_timeout(struct ata_port *ap);
192static int ahci_port_start(struct ata_port *ap); 197static int ahci_port_start(struct ata_port *ap);
@@ -202,11 +207,11 @@ static struct scsi_host_template ahci_sht = {
202 .name = DRV_NAME, 207 .name = DRV_NAME,
203 .ioctl = ata_scsi_ioctl, 208 .ioctl = ata_scsi_ioctl,
204 .queuecommand = ata_scsi_queuecmd, 209 .queuecommand = ata_scsi_queuecmd,
210 .eh_timed_out = ata_scsi_timed_out,
205 .eh_strategy_handler = ata_scsi_error, 211 .eh_strategy_handler = ata_scsi_error,
206 .can_queue = ATA_DEF_QUEUE, 212 .can_queue = ATA_DEF_QUEUE,
207 .this_id = ATA_SHT_THIS_ID, 213 .this_id = ATA_SHT_THIS_ID,
208 .sg_tablesize = AHCI_MAX_SG, 214 .sg_tablesize = AHCI_MAX_SG,
209 .max_sectors = ATA_MAX_SECTORS,
210 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 215 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
211 .emulated = ATA_SHT_EMULATED, 216 .emulated = ATA_SHT_EMULATED,
212 .use_clustering = AHCI_USE_CLUSTERING, 217 .use_clustering = AHCI_USE_CLUSTERING,
@@ -225,7 +230,7 @@ static const struct ata_port_operations ahci_ops = {
225 230
226 .tf_read = ahci_tf_read, 231 .tf_read = ahci_tf_read,
227 232
228 .phy_reset = ahci_phy_reset, 233 .probe_reset = ahci_probe_reset,
229 234
230 .qc_prep = ahci_qc_prep, 235 .qc_prep = ahci_qc_prep,
231 .qc_issue = ahci_qc_issue, 236 .qc_issue = ahci_qc_issue,
@@ -247,8 +252,7 @@ static const struct ata_port_info ahci_port_info[] = {
247 { 252 {
248 .sht = &ahci_sht, 253 .sht = &ahci_sht,
249 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 254 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
250 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 255 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
251 ATA_FLAG_PIO_DMA,
252 .pio_mask = 0x1f, /* pio0-4 */ 256 .pio_mask = 0x1f, /* pio0-4 */
253 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 257 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
254 .port_ops = &ahci_ops, 258 .port_ops = &ahci_ops,
@@ -450,17 +454,48 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
450 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 454 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
451} 455}
452 456
453static void ahci_phy_reset(struct ata_port *ap) 457static int ahci_stop_engine(struct ata_port *ap)
454{ 458{
455 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 459 void __iomem *mmio = ap->host_set->mmio_base;
456 struct ata_taskfile tf; 460 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
457 struct ata_device *dev = &ap->device[0]; 461 int work;
458 u32 new_tmp, tmp; 462 u32 tmp;
459 463
460 __sata_phy_reset(ap); 464 tmp = readl(port_mmio + PORT_CMD);
465 tmp &= ~PORT_CMD_START;
466 writel(tmp, port_mmio + PORT_CMD);
461 467
462 if (ap->flags & ATA_FLAG_PORT_DISABLED) 468 /* wait for engine to stop. TODO: this could be
463 return; 469 * as long as 500 msec
470 */
471 work = 1000;
472 while (work-- > 0) {
473 tmp = readl(port_mmio + PORT_CMD);
474 if ((tmp & PORT_CMD_LIST_ON) == 0)
475 return 0;
476 udelay(10);
477 }
478
479 return -EIO;
480}
481
482static void ahci_start_engine(struct ata_port *ap)
483{
484 void __iomem *mmio = ap->host_set->mmio_base;
485 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
486 u32 tmp;
487
488 tmp = readl(port_mmio + PORT_CMD);
489 tmp |= PORT_CMD_START;
490 writel(tmp, port_mmio + PORT_CMD);
491 readl(port_mmio + PORT_CMD); /* flush */
492}
493
494static unsigned int ahci_dev_classify(struct ata_port *ap)
495{
496 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
497 struct ata_taskfile tf;
498 u32 tmp;
464 499
465 tmp = readl(port_mmio + PORT_SIG); 500 tmp = readl(port_mmio + PORT_SIG);
466 tf.lbah = (tmp >> 24) & 0xff; 501 tf.lbah = (tmp >> 24) & 0xff;
@@ -468,15 +503,46 @@ static void ahci_phy_reset(struct ata_port *ap)
468 tf.lbal = (tmp >> 8) & 0xff; 503 tf.lbal = (tmp >> 8) & 0xff;
469 tf.nsect = (tmp) & 0xff; 504 tf.nsect = (tmp) & 0xff;
470 505
471 dev->class = ata_dev_classify(&tf); 506 return ata_dev_classify(&tf);
472 if (!ata_dev_present(dev)) { 507}
473 ata_port_disable(ap); 508
474 return; 509static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
475 } 510{
511 pp->cmd_slot[0].opts = cpu_to_le32(opts);
512 pp->cmd_slot[0].status = 0;
513 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
514 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
515}
516
517static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
518{
519 int rc;
520
521 DPRINTK("ENTER\n");
522
523 ahci_stop_engine(ap);
524 rc = sata_std_hardreset(ap, verbose, class);
525 ahci_start_engine(ap);
526
527 if (rc == 0)
528 *class = ahci_dev_classify(ap);
529 if (*class == ATA_DEV_UNKNOWN)
530 *class = ATA_DEV_NONE;
531
532 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
533 return rc;
534}
535
536static void ahci_postreset(struct ata_port *ap, unsigned int *class)
537{
538 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
539 u32 new_tmp, tmp;
540
541 ata_std_postreset(ap, class);
476 542
477 /* Make sure port's ATAPI bit is set appropriately */ 543 /* Make sure port's ATAPI bit is set appropriately */
478 new_tmp = tmp = readl(port_mmio + PORT_CMD); 544 new_tmp = tmp = readl(port_mmio + PORT_CMD);
479 if (dev->class == ATA_DEV_ATAPI) 545 if (*class == ATA_DEV_ATAPI)
480 new_tmp |= PORT_CMD_ATAPI; 546 new_tmp |= PORT_CMD_ATAPI;
481 else 547 else
482 new_tmp &= ~PORT_CMD_ATAPI; 548 new_tmp &= ~PORT_CMD_ATAPI;
@@ -486,6 +552,12 @@ static void ahci_phy_reset(struct ata_port *ap)
486 } 552 }
487} 553}
488 554
555static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
556{
557 return ata_drive_probe_reset(ap, NULL, NULL, ahci_hardreset,
558 ahci_postreset, classes);
559}
560
489static u8 ahci_check_status(struct ata_port *ap) 561static u8 ahci_check_status(struct ata_port *ap)
490{ 562{
491 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 563 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -533,42 +605,36 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
533{ 605{
534 struct ata_port *ap = qc->ap; 606 struct ata_port *ap = qc->ap;
535 struct ahci_port_priv *pp = ap->private_data; 607 struct ahci_port_priv *pp = ap->private_data;
608 int is_atapi = is_atapi_taskfile(&qc->tf);
536 u32 opts; 609 u32 opts;
537 const u32 cmd_fis_len = 5; /* five dwords */ 610 const u32 cmd_fis_len = 5; /* five dwords */
538 unsigned int n_elem; 611 unsigned int n_elem;
539 612
540 /* 613 /*
541 * Fill in command slot information (currently only one slot,
542 * slot 0, is currently since we don't do queueing)
543 */
544
545 opts = cmd_fis_len;
546 if (qc->tf.flags & ATA_TFLAG_WRITE)
547 opts |= AHCI_CMD_WRITE;
548 if (is_atapi_taskfile(&qc->tf))
549 opts |= AHCI_CMD_ATAPI;
550
551 pp->cmd_slot[0].opts = cpu_to_le32(opts);
552 pp->cmd_slot[0].status = 0;
553 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
554 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
555
556 /*
557 * Fill in command table information. First, the header, 614 * Fill in command table information. First, the header,
558 * a SATA Register - Host to Device command FIS. 615 * a SATA Register - Host to Device command FIS.
559 */ 616 */
560 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 617 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
561 if (opts & AHCI_CMD_ATAPI) { 618 if (is_atapi) {
562 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 619 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
563 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, ap->cdb_len); 620 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
621 qc->dev->cdb_len);
564 } 622 }
565 623
566 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 624 n_elem = 0;
567 return; 625 if (qc->flags & ATA_QCFLAG_DMAMAP)
626 n_elem = ahci_fill_sg(qc);
568 627
569 n_elem = ahci_fill_sg(qc); 628 /*
629 * Fill in command slot information.
630 */
631 opts = cmd_fis_len | n_elem << 16;
632 if (qc->tf.flags & ATA_TFLAG_WRITE)
633 opts |= AHCI_CMD_WRITE;
634 if (is_atapi)
635 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
570 636
571 pp->cmd_slot[0].opts |= cpu_to_le32(n_elem << 16); 637 ahci_fill_cmd_slot(pp, opts);
572} 638}
573 639
574static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 640static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
@@ -576,7 +642,6 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
576 void __iomem *mmio = ap->host_set->mmio_base; 642 void __iomem *mmio = ap->host_set->mmio_base;
577 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 643 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
578 u32 tmp; 644 u32 tmp;
579 int work;
580 645
581 if ((ap->device[0].class != ATA_DEV_ATAPI) || 646 if ((ap->device[0].class != ATA_DEV_ATAPI) ||
582 ((irq_stat & PORT_IRQ_TF_ERR) == 0)) 647 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
@@ -592,20 +657,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
592 readl(port_mmio + PORT_SCR_ERR)); 657 readl(port_mmio + PORT_SCR_ERR));
593 658
594 /* stop DMA */ 659 /* stop DMA */
595 tmp = readl(port_mmio + PORT_CMD); 660 ahci_stop_engine(ap);
596 tmp &= ~PORT_CMD_START;
597 writel(tmp, port_mmio + PORT_CMD);
598
599 /* wait for engine to stop. TODO: this could be
600 * as long as 500 msec
601 */
602 work = 1000;
603 while (work-- > 0) {
604 tmp = readl(port_mmio + PORT_CMD);
605 if ((tmp & PORT_CMD_LIST_ON) == 0)
606 break;
607 udelay(10);
608 }
609 661
610 /* clear SATA phy error, if any */ 662 /* clear SATA phy error, if any */
611 tmp = readl(port_mmio + PORT_SCR_ERR); 663 tmp = readl(port_mmio + PORT_SCR_ERR);
@@ -624,10 +676,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
624 } 676 }
625 677
626 /* re-start DMA */ 678 /* re-start DMA */
627 tmp = readl(port_mmio + PORT_CMD); 679 ahci_start_engine(ap);
628 tmp |= PORT_CMD_START;
629 writel(tmp, port_mmio + PORT_CMD);
630 readl(port_mmio + PORT_CMD); /* flush */
631} 680}
632 681
633static void ahci_eng_timeout(struct ata_port *ap) 682static void ahci_eng_timeout(struct ata_port *ap)
@@ -642,25 +691,13 @@ static void ahci_eng_timeout(struct ata_port *ap)
642 691
643 spin_lock_irqsave(&host_set->lock, flags); 692 spin_lock_irqsave(&host_set->lock, flags);
644 693
694 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
645 qc = ata_qc_from_tag(ap, ap->active_tag); 695 qc = ata_qc_from_tag(ap, ap->active_tag);
646 if (!qc) { 696 qc->err_mask |= AC_ERR_TIMEOUT;
647 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
648 ap->id);
649 } else {
650 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
651
652 /* hack alert! We cannot use the supplied completion
653 * function from inside the ->eh_strategy_handler() thread.
654 * libata is the only user of ->eh_strategy_handler() in
655 * any kernel, so the default scsi_done() assumes it is
656 * not being called from the SCSI EH.
657 */
658 qc->scsidone = scsi_finish_command;
659 qc->err_mask |= AC_ERR_OTHER;
660 ata_qc_complete(qc);
661 }
662 697
663 spin_unlock_irqrestore(&host_set->lock, flags); 698 spin_unlock_irqrestore(&host_set->lock, flags);
699
700 ata_eh_qc_complete(qc);
664} 701}
665 702
666static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 703static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -678,7 +715,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
678 ci = readl(port_mmio + PORT_CMD_ISSUE); 715 ci = readl(port_mmio + PORT_CMD_ISSUE);
679 if (likely((ci & 0x1) == 0)) { 716 if (likely((ci & 0x1) == 0)) {
680 if (qc) { 717 if (qc) {
681 assert(qc->err_mask == 0); 718 WARN_ON(qc->err_mask);
682 ata_qc_complete(qc); 719 ata_qc_complete(qc);
683 qc = NULL; 720 qc = NULL;
684 } 721 }
@@ -697,7 +734,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
697 ahci_restart_port(ap, status); 734 ahci_restart_port(ap, status);
698 735
699 if (qc) { 736 if (qc) {
700 qc->err_mask |= AC_ERR_OTHER; 737 qc->err_mask |= err_mask;
701 ata_qc_complete(qc); 738 ata_qc_complete(qc);
702 } 739 }
703 } 740 }
@@ -770,7 +807,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
770 return IRQ_RETVAL(handled); 807 return IRQ_RETVAL(handled);
771} 808}
772 809
773static int ahci_qc_issue(struct ata_queued_cmd *qc) 810static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
774{ 811{
775 struct ata_port *ap = qc->ap; 812 struct ata_port *ap = qc->ap;
776 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 813 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fc3ca051ceed..9327b62f97de 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -101,36 +101,54 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_AHCI = (1 << 28), /* AHCI possible */ 104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_CHECKINTR = (1 << 29), /* make sure PCI INTx enabled */ 105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ 106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
109 /* ICH6/7 use different scheme for map value */
110 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
107 111
108 /* combined mode. if set, PATA is channel 0. 112 /* combined mode. if set, PATA is channel 0.
109 * if clear, PATA is channel 1. 113 * if clear, PATA is channel 1.
110 */ 114 */
111 PIIX_COMB_PATA_P0 = (1 << 1),
112 PIIX_COMB = (1 << 2), /* combined mode enabled? */
113
114 PIIX_PORT_ENABLED = (1 << 0), 115 PIIX_PORT_ENABLED = (1 << 0),
115 PIIX_PORT_PRESENT = (1 << 4), 116 PIIX_PORT_PRESENT = (1 << 4),
116 117
117 PIIX_80C_PRI = (1 << 5) | (1 << 4), 118 PIIX_80C_PRI = (1 << 5) | (1 << 4),
118 PIIX_80C_SEC = (1 << 7) | (1 << 6), 119 PIIX_80C_SEC = (1 << 7) | (1 << 6),
119 120
120 ich5_pata = 0, 121 /* controller IDs */
121 ich5_sata = 1, 122 piix4_pata = 0,
122 piix4_pata = 2, 123 ich5_pata = 1,
123 ich6_sata = 3, 124 ich5_sata = 2,
124 ich6_sata_ahci = 4, 125 esb_sata = 3,
126 ich6_sata = 4,
127 ich6_sata_ahci = 5,
128 ich6m_sata_ahci = 6,
129
130 /* constants for mapping table */
131 P0 = 0, /* port 0 */
132 P1 = 1, /* port 1 */
133 P2 = 2, /* port 2 */
134 P3 = 3, /* port 3 */
135 IDE = -1, /* IDE */
136 NA = -2, /* not avaliable */
137 RV = -3, /* reserved */
125 138
126 PIIX_AHCI_DEVICE = 6, 139 PIIX_AHCI_DEVICE = 6,
127}; 140};
128 141
142struct piix_map_db {
143 const u32 mask;
144 const int map[][4];
145};
146
129static int piix_init_one (struct pci_dev *pdev, 147static int piix_init_one (struct pci_dev *pdev,
130 const struct pci_device_id *ent); 148 const struct pci_device_id *ent);
131 149
132static void piix_pata_phy_reset(struct ata_port *ap); 150static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
133static void piix_sata_phy_reset(struct ata_port *ap); 151static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
134static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 152static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
135static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 153static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
136 154
@@ -147,19 +165,32 @@ static const struct pci_device_id piix_pci_tbl[] = {
147 * list in drivers/pci/quirks.c. 165 * list in drivers/pci/quirks.c.
148 */ 166 */
149 167
168 /* 82801EB (ICH5) */
150 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 169 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
170 /* 82801EB (ICH5) */
151 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 171 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
152 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 172 /* 6300ESB (ICH5 variant with broken PCS present bits) */
153 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 173 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
174 /* 6300ESB pretending RAID */
175 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
176 /* 82801FB/FW (ICH6/ICH6W) */
154 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 177 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
178 /* 82801FR/FRW (ICH6R/ICH6RW) */
155 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 179 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
156 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 180 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
181 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
182 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
157 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 183 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
158 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 184 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
185 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
186 /* Enterprise Southbridge 2 (where's the datasheet?) */
159 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 187 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
188 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
160 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 189 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
190 /* SATA Controller 2 IDE (ICH8, ditto) */
161 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 191 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
162 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 192 /* Mobile SATA Controller IDE (ICH8M, ditto) */
193 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
163 194
164 { } /* terminate list */ 195 { } /* terminate list */
165}; 196};
@@ -178,11 +209,11 @@ static struct scsi_host_template piix_sht = {
178 .name = DRV_NAME, 209 .name = DRV_NAME,
179 .ioctl = ata_scsi_ioctl, 210 .ioctl = ata_scsi_ioctl,
180 .queuecommand = ata_scsi_queuecmd, 211 .queuecommand = ata_scsi_queuecmd,
212 .eh_timed_out = ata_scsi_timed_out,
181 .eh_strategy_handler = ata_scsi_error, 213 .eh_strategy_handler = ata_scsi_error,
182 .can_queue = ATA_DEF_QUEUE, 214 .can_queue = ATA_DEF_QUEUE,
183 .this_id = ATA_SHT_THIS_ID, 215 .this_id = ATA_SHT_THIS_ID,
184 .sg_tablesize = LIBATA_MAX_PRD, 216 .sg_tablesize = LIBATA_MAX_PRD,
185 .max_sectors = ATA_MAX_SECTORS,
186 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 217 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
187 .emulated = ATA_SHT_EMULATED, 218 .emulated = ATA_SHT_EMULATED,
188 .use_clustering = ATA_SHT_USE_CLUSTERING, 219 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -205,7 +236,7 @@ static const struct ata_port_operations piix_pata_ops = {
205 .exec_command = ata_exec_command, 236 .exec_command = ata_exec_command,
206 .dev_select = ata_std_dev_select, 237 .dev_select = ata_std_dev_select,
207 238
208 .phy_reset = piix_pata_phy_reset, 239 .probe_reset = piix_pata_probe_reset,
209 240
210 .bmdma_setup = ata_bmdma_setup, 241 .bmdma_setup = ata_bmdma_setup,
211 .bmdma_start = ata_bmdma_start, 242 .bmdma_start = ata_bmdma_start,
@@ -233,7 +264,7 @@ static const struct ata_port_operations piix_sata_ops = {
233 .exec_command = ata_exec_command, 264 .exec_command = ata_exec_command,
234 .dev_select = ata_std_dev_select, 265 .dev_select = ata_std_dev_select,
235 266
236 .phy_reset = piix_sata_phy_reset, 267 .probe_reset = piix_sata_probe_reset,
237 268
238 .bmdma_setup = ata_bmdma_setup, 269 .bmdma_setup = ata_bmdma_setup,
239 .bmdma_start = ata_bmdma_start, 270 .bmdma_start = ata_bmdma_start,
@@ -252,12 +283,62 @@ static const struct ata_port_operations piix_sata_ops = {
252 .host_stop = ata_host_stop, 283 .host_stop = ata_host_stop,
253}; 284};
254 285
286static struct piix_map_db ich5_map_db = {
287 .mask = 0x7,
288 .map = {
289 /* PM PS SM SS MAP */
290 { P0, NA, P1, NA }, /* 000b */
291 { P1, NA, P0, NA }, /* 001b */
292 { RV, RV, RV, RV },
293 { RV, RV, RV, RV },
294 { P0, P1, IDE, IDE }, /* 100b */
295 { P1, P0, IDE, IDE }, /* 101b */
296 { IDE, IDE, P0, P1 }, /* 110b */
297 { IDE, IDE, P1, P0 }, /* 111b */
298 },
299};
300
301static struct piix_map_db ich6_map_db = {
302 .mask = 0x3,
303 .map = {
304 /* PM PS SM SS MAP */
305 { P0, P1, P2, P3 }, /* 00b */
306 { IDE, IDE, P1, P3 }, /* 01b */
307 { P0, P2, IDE, IDE }, /* 10b */
308 { RV, RV, RV, RV },
309 },
310};
311
312static struct piix_map_db ich6m_map_db = {
313 .mask = 0x3,
314 .map = {
315 /* PM PS SM SS MAP */
316 { P0, P1, P2, P3 }, /* 00b */
317 { RV, RV, RV, RV },
318 { P0, P2, IDE, IDE }, /* 10b */
319 { RV, RV, RV, RV },
320 },
321};
322
255static struct ata_port_info piix_port_info[] = { 323static struct ata_port_info piix_port_info[] = {
324 /* piix4_pata */
325 {
326 .sht = &piix_sht,
327 .host_flags = ATA_FLAG_SLAVE_POSS,
328 .pio_mask = 0x1f, /* pio0-4 */
329#if 0
330 .mwdma_mask = 0x06, /* mwdma1-2 */
331#else
332 .mwdma_mask = 0x00, /* mwdma broken */
333#endif
334 .udma_mask = ATA_UDMA_MASK_40C,
335 .port_ops = &piix_pata_ops,
336 },
337
256 /* ich5_pata */ 338 /* ich5_pata */
257 { 339 {
258 .sht = &piix_sht, 340 .sht = &piix_sht,
259 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 341 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
260 PIIX_FLAG_CHECKINTR,
261 .pio_mask = 0x1f, /* pio0-4 */ 342 .pio_mask = 0x1f, /* pio0-4 */
262#if 0 343#if 0
263 .mwdma_mask = 0x06, /* mwdma1-2 */ 344 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -271,50 +352,63 @@ static struct ata_port_info piix_port_info[] = {
271 /* ich5_sata */ 352 /* ich5_sata */
272 { 353 {
273 .sht = &piix_sht, 354 .sht = &piix_sht,
274 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 355 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
275 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR, 356 PIIX_FLAG_CHECKINTR,
276 .pio_mask = 0x1f, /* pio0-4 */ 357 .pio_mask = 0x1f, /* pio0-4 */
277 .mwdma_mask = 0x07, /* mwdma0-2 */ 358 .mwdma_mask = 0x07, /* mwdma0-2 */
278 .udma_mask = 0x7f, /* udma0-6 */ 359 .udma_mask = 0x7f, /* udma0-6 */
279 .port_ops = &piix_sata_ops, 360 .port_ops = &piix_sata_ops,
361 .private_data = &ich5_map_db,
280 }, 362 },
281 363
282 /* piix4_pata */ 364 /* i6300esb_sata */
283 { 365 {
284 .sht = &piix_sht, 366 .sht = &piix_sht,
285 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 367 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
368 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
286 .pio_mask = 0x1f, /* pio0-4 */ 369 .pio_mask = 0x1f, /* pio0-4 */
287#if 0 370 .mwdma_mask = 0x07, /* mwdma0-2 */
288 .mwdma_mask = 0x06, /* mwdma1-2 */ 371 .udma_mask = 0x7f, /* udma0-6 */
289#else 372 .port_ops = &piix_sata_ops,
290 .mwdma_mask = 0x00, /* mwdma broken */ 373 .private_data = &ich5_map_db,
291#endif
292 .udma_mask = ATA_UDMA_MASK_40C,
293 .port_ops = &piix_pata_ops,
294 }, 374 },
295 375
296 /* ich6_sata */ 376 /* ich6_sata */
297 { 377 {
298 .sht = &piix_sht, 378 .sht = &piix_sht,
299 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 379 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
300 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 380 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
301 ATA_FLAG_SLAVE_POSS,
302 .pio_mask = 0x1f, /* pio0-4 */ 381 .pio_mask = 0x1f, /* pio0-4 */
303 .mwdma_mask = 0x07, /* mwdma0-2 */ 382 .mwdma_mask = 0x07, /* mwdma0-2 */
304 .udma_mask = 0x7f, /* udma0-6 */ 383 .udma_mask = 0x7f, /* udma0-6 */
305 .port_ops = &piix_sata_ops, 384 .port_ops = &piix_sata_ops,
385 .private_data = &ich6_map_db,
306 }, 386 },
307 387
308 /* ich6_sata_ahci */ 388 /* ich6_sata_ahci */
309 { 389 {
310 .sht = &piix_sht, 390 .sht = &piix_sht,
311 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 391 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
312 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 392 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
313 ATA_FLAG_SLAVE_POSS | PIIX_FLAG_AHCI, 393 PIIX_FLAG_AHCI,
394 .pio_mask = 0x1f, /* pio0-4 */
395 .mwdma_mask = 0x07, /* mwdma0-2 */
396 .udma_mask = 0x7f, /* udma0-6 */
397 .port_ops = &piix_sata_ops,
398 .private_data = &ich6_map_db,
399 },
400
401 /* ich6m_sata_ahci */
402 {
403 .sht = &piix_sht,
404 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
405 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
406 PIIX_FLAG_AHCI,
314 .pio_mask = 0x1f, /* pio0-4 */ 407 .pio_mask = 0x1f, /* pio0-4 */
315 .mwdma_mask = 0x07, /* mwdma0-2 */ 408 .mwdma_mask = 0x07, /* mwdma0-2 */
316 .udma_mask = 0x7f, /* udma0-6 */ 409 .udma_mask = 0x7f, /* udma0-6 */
317 .port_ops = &piix_sata_ops, 410 .port_ops = &piix_sata_ops,
411 .private_data = &ich6m_map_db,
318 }, 412 },
319}; 413};
320 414
@@ -363,102 +457,123 @@ cbl40:
363} 457}
364 458
365/** 459/**
366 * piix_pata_phy_reset - Probe specified port on PATA host controller 460 * piix_pata_probeinit - probeinit for PATA host controller
367 * @ap: Port to probe 461 * @ap: Target port
368 * 462 *
369 * Probe PATA phy. 463 * Probeinit including cable detection.
370 * 464 *
371 * LOCKING: 465 * LOCKING:
372 * None (inherited from caller). 466 * None (inherited from caller).
373 */ 467 */
468static void piix_pata_probeinit(struct ata_port *ap)
469{
470 piix_pata_cbl_detect(ap);
471 ata_std_probeinit(ap);
472}
374 473
375static void piix_pata_phy_reset(struct ata_port *ap) 474/**
475 * piix_pata_probe_reset - Perform reset on PATA port and classify
476 * @ap: Port to reset
477 * @classes: Resulting classes of attached devices
478 *
479 * Reset PATA phy and classify attached devices.
480 *
481 * LOCKING:
482 * None (inherited from caller).
483 */
484static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
376{ 485{
377 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 486 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
378 487
379 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 488 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
380 ata_port_disable(ap);
381 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 489 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
382 return; 490 return 0;
383 } 491 }
384 492
385 piix_pata_cbl_detect(ap); 493 return ata_drive_probe_reset(ap, piix_pata_probeinit,
386 494 ata_std_softreset, NULL,
387 ata_port_probe(ap); 495 ata_std_postreset, classes);
388
389 ata_bus_reset(ap);
390} 496}
391 497
392/** 498/**
393 * piix_sata_probe - Probe PCI device for present SATA devices 499 * piix_sata_probe - Probe PCI device for present SATA devices
394 * @ap: Port associated with the PCI device we wish to probe 500 * @ap: Port associated with the PCI device we wish to probe
395 * 501 *
396 * Reads SATA PCI device's PCI config register Port Configuration 502 * Reads and configures SATA PCI device's PCI config register
397 * and Status (PCS) to determine port and device availability. 503 * Port Configuration and Status (PCS) to determine port and
504 * device availability.
398 * 505 *
399 * LOCKING: 506 * LOCKING:
400 * None (inherited from caller). 507 * None (inherited from caller).
401 * 508 *
402 * RETURNS: 509 * RETURNS:
403 * Non-zero if port is enabled, it may or may not have a device 510 * Mask of avaliable devices on the port.
404 * attached in that case (PRESENT bit would only be set if BIOS probe
405 * was done). Zero is returned if port is disabled.
406 */ 511 */
407static int piix_sata_probe (struct ata_port *ap) 512static unsigned int piix_sata_probe (struct ata_port *ap)
408{ 513{
409 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 514 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
410 int combined = (ap->flags & ATA_FLAG_SLAVE_POSS); 515 const unsigned int *map = ap->host_set->private_data;
411 int orig_mask, mask, i; 516 int base = 2 * ap->hard_port_no;
517 unsigned int present_mask = 0;
518 int port, i;
412 u8 pcs; 519 u8 pcs;
413 520
414 mask = (PIIX_PORT_PRESENT << ap->hard_port_no) |
415 (PIIX_PORT_ENABLED << ap->hard_port_no);
416
417 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 521 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
418 orig_mask = (int) pcs & 0xff; 522 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
419
420 /* TODO: this is vaguely wrong for ICH6 combined mode,
421 * where only two of the four SATA ports are mapped
422 * onto a single ATA channel. It is also vaguely inaccurate
423 * for ICH5, which has only two ports. However, this is ok,
424 * as further device presence detection code will handle
425 * any false positives produced here.
426 */
427 523
428 for (i = 0; i < 4; i++) { 524 /* enable all ports on this ap and wait for them to settle */
429 mask = (PIIX_PORT_ENABLED << i); 525 for (i = 0; i < 2; i++) {
526 port = map[base + i];
527 if (port >= 0)
528 pcs |= 1 << port;
529 }
530
531 pci_write_config_byte(pdev, ICH5_PCS, pcs);
532 msleep(100);
430 533
431 if ((orig_mask & mask) == mask) 534 /* let's see which devices are present */
432 if (combined || (i == ap->hard_port_no)) 535 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
433 return 1; 536
537 for (i = 0; i < 2; i++) {
538 port = map[base + i];
539 if (port < 0)
540 continue;
541 if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port))
542 present_mask |= 1 << i;
543 else
544 pcs &= ~(1 << port);
434 } 545 }
435 546
436 return 0; 547 /* disable offline ports on non-AHCI controllers */
548 if (!(ap->flags & PIIX_FLAG_AHCI))
549 pci_write_config_byte(pdev, ICH5_PCS, pcs);
550
551 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
552 ap->id, pcs, present_mask);
553
554 return present_mask;
437} 555}
438 556
439/** 557/**
440 * piix_sata_phy_reset - Probe specified port on SATA host controller 558 * piix_sata_probe_reset - Perform reset on SATA port and classify
441 * @ap: Port to probe 559 * @ap: Port to reset
560 * @classes: Resulting classes of attached devices
442 * 561 *
443 * Probe SATA phy. 562 * Reset SATA phy and classify attached devices.
444 * 563 *
445 * LOCKING: 564 * LOCKING:
446 * None (inherited from caller). 565 * None (inherited from caller).
447 */ 566 */
448 567static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
449static void piix_sata_phy_reset(struct ata_port *ap)
450{ 568{
451 if (!piix_sata_probe(ap)) { 569 if (!piix_sata_probe(ap)) {
452 ata_port_disable(ap);
453 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 570 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
454 return; 571 return 0;
455 } 572 }
456 573
457 ap->cbl = ATA_CBL_SATA; 574 return ata_drive_probe_reset(ap, ata_std_probeinit,
458 575 ata_std_softreset, NULL,
459 ata_port_probe(ap); 576 ata_std_postreset, classes);
460
461 ata_bus_reset(ap);
462} 577}
463 578
464/** 579/**
@@ -627,6 +742,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
627 742
628/** 743/**
629 * piix_check_450nx_errata - Check for problem 450NX setup 744 * piix_check_450nx_errata - Check for problem 450NX setup
745 * @ata_dev: the PCI device to check
630 * 746 *
631 * Check for the present of 450NX errata #19 and errata #25. If 747 * Check for the present of 450NX errata #19 and errata #25. If
632 * they are found return an error code so we can turn off DMA 748 * they are found return an error code so we can turn off DMA
@@ -659,6 +775,54 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
659 return no_piix_dma; 775 return no_piix_dma;
660} 776}
661 777
778static void __devinit piix_init_sata_map(struct pci_dev *pdev,
779 struct ata_port_info *pinfo)
780{
781 struct piix_map_db *map_db = pinfo[0].private_data;
782 const unsigned int *map;
783 int i, invalid_map = 0;
784 u8 map_value;
785
786 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
787
788 map = map_db->map[map_value & map_db->mask];
789
790 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
791 for (i = 0; i < 4; i++) {
792 switch (map[i]) {
793 case RV:
794 invalid_map = 1;
795 printk(" XX");
796 break;
797
798 case NA:
799 printk(" --");
800 break;
801
802 case IDE:
803 WARN_ON((i & 1) || map[i + 1] != IDE);
804 pinfo[i / 2] = piix_port_info[ich5_pata];
805 i++;
806 printk(" IDE IDE");
807 break;
808
809 default:
810 printk(" P%d", map[i]);
811 if (i & 1)
812 pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
813 break;
814 }
815 }
816 printk(" ]\n");
817
818 if (invalid_map)
819 dev_printk(KERN_ERR, &pdev->dev,
820 "invalid MAP value %u\n", map_value);
821
822 pinfo[0].private_data = (void *)map;
823 pinfo[1].private_data = (void *)map;
824}
825
662/** 826/**
663 * piix_init_one - Register PIIX ATA PCI device with kernel services 827 * piix_init_one - Register PIIX ATA PCI device with kernel services
664 * @pdev: PCI device to register 828 * @pdev: PCI device to register
@@ -677,9 +841,9 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
677static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 841static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
678{ 842{
679 static int printed_version; 843 static int printed_version;
680 struct ata_port_info *port_info[2]; 844 struct ata_port_info port_info[2];
681 unsigned int combined = 0; 845 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
682 unsigned int pata_chan = 0, sata_chan = 0; 846 unsigned long host_flags;
683 847
684 if (!printed_version++) 848 if (!printed_version++)
685 dev_printk(KERN_DEBUG, &pdev->dev, 849 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -689,10 +853,12 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
689 if (!in_module_init) 853 if (!in_module_init)
690 return -ENODEV; 854 return -ENODEV;
691 855
692 port_info[0] = &piix_port_info[ent->driver_data]; 856 port_info[0] = piix_port_info[ent->driver_data];
693 port_info[1] = &piix_port_info[ent->driver_data]; 857 port_info[1] = piix_port_info[ent->driver_data];
858
859 host_flags = port_info[0].host_flags;
694 860
695 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 861 if (host_flags & PIIX_FLAG_AHCI) {
696 u8 tmp; 862 u8 tmp;
697 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 863 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
698 if (tmp == PIIX_AHCI_DEVICE) { 864 if (tmp == PIIX_AHCI_DEVICE) {
@@ -702,18 +868,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
702 } 868 }
703 } 869 }
704 870
705 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { 871 /* Initialize SATA map */
706 u8 tmp; 872 if (host_flags & ATA_FLAG_SATA)
707 pci_read_config_byte(pdev, ICH5_PMR, &tmp); 873 piix_init_sata_map(pdev, port_info);
708
709 if (tmp & PIIX_COMB) {
710 combined = 1;
711 if (tmp & PIIX_COMB_PATA_P0)
712 sata_chan = 1;
713 else
714 pata_chan = 1;
715 }
716 }
717 874
718 /* On ICH5, some BIOSen disable the interrupt using the 875 /* On ICH5, some BIOSen disable the interrupt using the
719 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 876 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -721,28 +878,19 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
721 * MSI is disabled (and it is disabled, as we don't use 878 * MSI is disabled (and it is disabled, as we don't use
722 * message-signalled interrupts currently). 879 * message-signalled interrupts currently).
723 */ 880 */
724 if (port_info[0]->host_flags & PIIX_FLAG_CHECKINTR) 881 if (host_flags & PIIX_FLAG_CHECKINTR)
725 pci_intx(pdev, 1); 882 pci_intx(pdev, 1);
726 883
727 if (combined) {
728 port_info[sata_chan] = &piix_port_info[ent->driver_data];
729 port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS;
730 port_info[pata_chan] = &piix_port_info[ich5_pata];
731
732 dev_printk(KERN_WARNING, &pdev->dev,
733 "combined mode detected (p=%u, s=%u)\n",
734 pata_chan, sata_chan);
735 }
736 if (piix_check_450nx_errata(pdev)) { 884 if (piix_check_450nx_errata(pdev)) {
737 /* This writes into the master table but it does not 885 /* This writes into the master table but it does not
738 really matter for this errata as we will apply it to 886 really matter for this errata as we will apply it to
739 all the PIIX devices on the board */ 887 all the PIIX devices on the board */
740 port_info[0]->mwdma_mask = 0; 888 port_info[0].mwdma_mask = 0;
741 port_info[0]->udma_mask = 0; 889 port_info[0].udma_mask = 0;
742 port_info[1]->mwdma_mask = 0; 890 port_info[1].mwdma_mask = 0;
743 port_info[1]->udma_mask = 0; 891 port_info[1].udma_mask = 0;
744 } 892 }
745 return ata_pci_init_one(pdev, port_info, 2); 893 return ata_pci_init_one(pdev, ppinfo, 2);
746} 894}
747 895
748static int __init piix_init(void) 896static int __init piix_init(void)
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
new file mode 100644
index 000000000000..a93336adcd23
--- /dev/null
+++ b/drivers/scsi/libata-bmdma.c
@@ -0,0 +1,703 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/pci.h>
38#include <linux/libata.h>
39
40#include "libata.h"
41
42/**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54{
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98}
99
100/**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112{
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156}
157
158
159/**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181{
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186}
187
188/**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201{
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206}
207
208
209/**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * LOCKING:
218 * spin_lock_irqsave(host_set lock)
219 */
220
221static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
222{
223 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
224
225 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
226 ata_pause(ap);
227}
228
229
230/**
231 * ata_exec_command - issue ATA command to host controller
232 * @ap: port to which command is being issued
233 * @tf: ATA taskfile register set
234 *
235 * Issues PIO/MMIO write to ATA command register, with proper
236 * synchronization with interrupt handler / other threads.
237 *
238 * LOCKING:
239 * spin_lock_irqsave(host_set lock)
240 */
241void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
242{
243 if (ap->flags & ATA_FLAG_MMIO)
244 ata_exec_command_mmio(ap, tf);
245 else
246 ata_exec_command_pio(ap, tf);
247}
248
249/**
250 * ata_tf_read_pio - input device's ATA taskfile shadow registers
251 * @ap: Port from which input is read
252 * @tf: ATA taskfile register set for storing input
253 *
254 * Reads ATA taskfile registers for currently-selected device
255 * into @tf.
256 *
257 * LOCKING:
258 * Inherited from caller.
259 */
260
261static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
262{
263 struct ata_ioports *ioaddr = &ap->ioaddr;
264
265 tf->command = ata_check_status(ap);
266 tf->feature = inb(ioaddr->error_addr);
267 tf->nsect = inb(ioaddr->nsect_addr);
268 tf->lbal = inb(ioaddr->lbal_addr);
269 tf->lbam = inb(ioaddr->lbam_addr);
270 tf->lbah = inb(ioaddr->lbah_addr);
271 tf->device = inb(ioaddr->device_addr);
272
273 if (tf->flags & ATA_TFLAG_LBA48) {
274 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
275 tf->hob_feature = inb(ioaddr->error_addr);
276 tf->hob_nsect = inb(ioaddr->nsect_addr);
277 tf->hob_lbal = inb(ioaddr->lbal_addr);
278 tf->hob_lbam = inb(ioaddr->lbam_addr);
279 tf->hob_lbah = inb(ioaddr->lbah_addr);
280 }
281}
282
283/**
284 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
285 * @ap: Port from which input is read
286 * @tf: ATA taskfile register set for storing input
287 *
288 * Reads ATA taskfile registers for currently-selected device
289 * into @tf via MMIO.
290 *
291 * LOCKING:
292 * Inherited from caller.
293 */
294
295static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
296{
297 struct ata_ioports *ioaddr = &ap->ioaddr;
298
299 tf->command = ata_check_status(ap);
300 tf->feature = readb((void __iomem *)ioaddr->error_addr);
301 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
302 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
303 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
304 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
305 tf->device = readb((void __iomem *)ioaddr->device_addr);
306
307 if (tf->flags & ATA_TFLAG_LBA48) {
308 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
309 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
310 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
311 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
312 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
313 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
314 }
315}
316
317
318/**
319 * ata_tf_read - input device's ATA taskfile shadow registers
320 * @ap: Port from which input is read
321 * @tf: ATA taskfile register set for storing input
322 *
323 * Reads ATA taskfile registers for currently-selected device
324 * into @tf.
325 *
326 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
327 * is set, also reads the hob registers.
328 *
329 * May be used as the tf_read() entry in ata_port_operations.
330 *
331 * LOCKING:
332 * Inherited from caller.
333 */
334void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
335{
336 if (ap->flags & ATA_FLAG_MMIO)
337 ata_tf_read_mmio(ap, tf);
338 else
339 ata_tf_read_pio(ap, tf);
340}
341
342/**
343 * ata_check_status_pio - Read device status reg & clear interrupt
344 * @ap: port where the device is
345 *
346 * Reads ATA taskfile status register for currently-selected device
347 * and return its value. This also clears pending interrupts
348 * from this device
349 *
350 * LOCKING:
351 * Inherited from caller.
352 */
353static u8 ata_check_status_pio(struct ata_port *ap)
354{
355 return inb(ap->ioaddr.status_addr);
356}
357
358/**
359 * ata_check_status_mmio - Read device status reg & clear interrupt
360 * @ap: port where the device is
361 *
362 * Reads ATA taskfile status register for currently-selected device
363 * via MMIO and return its value. This also clears pending interrupts
364 * from this device
365 *
366 * LOCKING:
367 * Inherited from caller.
368 */
369static u8 ata_check_status_mmio(struct ata_port *ap)
370{
371 return readb((void __iomem *) ap->ioaddr.status_addr);
372}
373
374
375/**
376 * ata_check_status - Read device status reg & clear interrupt
377 * @ap: port where the device is
378 *
379 * Reads ATA taskfile status register for currently-selected device
380 * and return its value. This also clears pending interrupts
381 * from this device
382 *
383 * May be used as the check_status() entry in ata_port_operations.
384 *
385 * LOCKING:
386 * Inherited from caller.
387 */
388u8 ata_check_status(struct ata_port *ap)
389{
390 if (ap->flags & ATA_FLAG_MMIO)
391 return ata_check_status_mmio(ap);
392 return ata_check_status_pio(ap);
393}
394
395
396/**
397 * ata_altstatus - Read device alternate status reg
398 * @ap: port where the device is
399 *
400 * Reads ATA taskfile alternate status register for
401 * currently-selected device and return its value.
402 *
403 * Note: may NOT be used as the check_altstatus() entry in
404 * ata_port_operations.
405 *
406 * LOCKING:
407 * Inherited from caller.
408 */
409u8 ata_altstatus(struct ata_port *ap)
410{
411 if (ap->ops->check_altstatus)
412 return ap->ops->check_altstatus(ap);
413
414 if (ap->flags & ATA_FLAG_MMIO)
415 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
416 return inb(ap->ioaddr.altstatus_addr);
417}
418
419#ifdef CONFIG_PCI
420static struct ata_probe_ent *
421ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
422{
423 struct ata_probe_ent *probe_ent;
424
425 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
426 if (!probe_ent) {
427 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
428 kobject_name(&(dev->kobj)));
429 return NULL;
430 }
431
432 INIT_LIST_HEAD(&probe_ent->node);
433 probe_ent->dev = dev;
434
435 probe_ent->sht = port->sht;
436 probe_ent->host_flags = port->host_flags;
437 probe_ent->pio_mask = port->pio_mask;
438 probe_ent->mwdma_mask = port->mwdma_mask;
439 probe_ent->udma_mask = port->udma_mask;
440 probe_ent->port_ops = port->port_ops;
441
442 return probe_ent;
443}
444
445
446/**
447 * ata_pci_init_native_mode - Initialize native-mode driver
448 * @pdev: pci device to be initialized
449 * @port: array[2] of pointers to port info structures.
450 * @ports: bitmap of ports present
451 *
452 * Utility function which allocates and initializes an
453 * ata_probe_ent structure for a standard dual-port
454 * PIO-based IDE controller. The returned ata_probe_ent
455 * structure can be passed to ata_device_add(). The returned
456 * ata_probe_ent structure should then be freed with kfree().
457 *
458 * The caller need only pass the address of the primary port, the
459 * secondary will be deduced automatically. If the device has non
460 * standard secondary port mappings this function can be called twice,
461 * once for each interface.
462 */
463
464struct ata_probe_ent *
465ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
466{
467 struct ata_probe_ent *probe_ent =
468 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
469 int p = 0;
470
471 if (!probe_ent)
472 return NULL;
473
474 probe_ent->irq = pdev->irq;
475 probe_ent->irq_flags = SA_SHIRQ;
476 probe_ent->private_data = port[0]->private_data;
477
478 if (ports & ATA_PORT_PRIMARY) {
479 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
480 probe_ent->port[p].altstatus_addr =
481 probe_ent->port[p].ctl_addr =
482 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
483 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
484 ata_std_ports(&probe_ent->port[p]);
485 p++;
486 }
487
488 if (ports & ATA_PORT_SECONDARY) {
489 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
490 probe_ent->port[p].altstatus_addr =
491 probe_ent->port[p].ctl_addr =
492 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
493 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
494 ata_std_ports(&probe_ent->port[p]);
495 p++;
496 }
497
498 probe_ent->n_ports = p;
499 return probe_ent;
500}
501
502
503static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
504 struct ata_port_info *port, int port_num)
505{
506 struct ata_probe_ent *probe_ent;
507
508 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
509 if (!probe_ent)
510 return NULL;
511
512 probe_ent->legacy_mode = 1;
513 probe_ent->n_ports = 1;
514 probe_ent->hard_port_no = port_num;
515 probe_ent->private_data = port->private_data;
516
517 switch(port_num)
518 {
519 case 0:
520 probe_ent->irq = 14;
521 probe_ent->port[0].cmd_addr = 0x1f0;
522 probe_ent->port[0].altstatus_addr =
523 probe_ent->port[0].ctl_addr = 0x3f6;
524 break;
525 case 1:
526 probe_ent->irq = 15;
527 probe_ent->port[0].cmd_addr = 0x170;
528 probe_ent->port[0].altstatus_addr =
529 probe_ent->port[0].ctl_addr = 0x376;
530 break;
531 }
532
533 probe_ent->port[0].bmdma_addr =
534 pci_resource_start(pdev, 4) + 8 * port_num;
535 ata_std_ports(&probe_ent->port[0]);
536
537 return probe_ent;
538}
539
540
541/**
542 * ata_pci_init_one - Initialize/register PCI IDE host controller
543 * @pdev: Controller to be initialized
544 * @port_info: Information from low-level host driver
545 * @n_ports: Number of ports attached to host controller
546 *
547 * This is a helper function which can be called from a driver's
548 * xxx_init_one() probe function if the hardware uses traditional
549 * IDE taskfile registers.
550 *
551 * This function calls pci_enable_device(), reserves its register
552 * regions, sets the dma mask, enables bus master mode, and calls
553 * ata_device_add()
554 *
555 * LOCKING:
556 * Inherited from PCI layer (may sleep).
557 *
558 * RETURNS:
559 * Zero on success, negative on errno-based value on error.
560 */
561
562int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
563 unsigned int n_ports)
564{
565 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
566 struct ata_port_info *port[2];
567 u8 tmp8, mask;
568 unsigned int legacy_mode = 0;
569 int disable_dev_on_err = 1;
570 int rc;
571
572 DPRINTK("ENTER\n");
573
574 port[0] = port_info[0];
575 if (n_ports > 1)
576 port[1] = port_info[1];
577 else
578 port[1] = port[0];
579
580 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
581 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
582 /* TODO: What if one channel is in native mode ... */
583 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
584 mask = (1 << 2) | (1 << 0);
585 if ((tmp8 & mask) != mask)
586 legacy_mode = (1 << 3);
587 }
588
589 /* FIXME... */
590 if ((!legacy_mode) && (n_ports > 2)) {
591 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
592 n_ports = 2;
593 /* For now */
594 }
595
596 /* FIXME: Really for ATA it isn't safe because the device may be
597 multi-purpose and we want to leave it alone if it was already
598 enabled. Secondly for shared use as Arjan says we want refcounting
599
600 Checking dev->is_enabled is insufficient as this is not set at
601 boot for the primary video which is BIOS enabled
602 */
603
604 rc = pci_enable_device(pdev);
605 if (rc)
606 return rc;
607
608 rc = pci_request_regions(pdev, DRV_NAME);
609 if (rc) {
610 disable_dev_on_err = 0;
611 goto err_out;
612 }
613
614 /* FIXME: Should use platform specific mappers for legacy port ranges */
615 if (legacy_mode) {
616 if (!request_region(0x1f0, 8, "libata")) {
617 struct resource *conflict, res;
618 res.start = 0x1f0;
619 res.end = 0x1f0 + 8 - 1;
620 conflict = ____request_resource(&ioport_resource, &res);
621 if (!strcmp(conflict->name, "libata"))
622 legacy_mode |= (1 << 0);
623 else {
624 disable_dev_on_err = 0;
625 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
626 }
627 } else
628 legacy_mode |= (1 << 0);
629
630 if (!request_region(0x170, 8, "libata")) {
631 struct resource *conflict, res;
632 res.start = 0x170;
633 res.end = 0x170 + 8 - 1;
634 conflict = ____request_resource(&ioport_resource, &res);
635 if (!strcmp(conflict->name, "libata"))
636 legacy_mode |= (1 << 1);
637 else {
638 disable_dev_on_err = 0;
639 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
640 }
641 } else
642 legacy_mode |= (1 << 1);
643 }
644
645 /* we have legacy mode, but all ports are unavailable */
646 if (legacy_mode == (1 << 3)) {
647 rc = -EBUSY;
648 goto err_out_regions;
649 }
650
651 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
652 if (rc)
653 goto err_out_regions;
654 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
655 if (rc)
656 goto err_out_regions;
657
658 if (legacy_mode) {
659 if (legacy_mode & (1 << 0))
660 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
661 if (legacy_mode & (1 << 1))
662 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
663 } else {
664 if (n_ports == 2)
665 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
666 else
667 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
668 }
669 if (!probe_ent && !probe_ent2) {
670 rc = -ENOMEM;
671 goto err_out_regions;
672 }
673
674 pci_set_master(pdev);
675
676 /* FIXME: check ata_device_add return */
677 if (legacy_mode) {
678 if (legacy_mode & (1 << 0))
679 ata_device_add(probe_ent);
680 if (legacy_mode & (1 << 1))
681 ata_device_add(probe_ent2);
682 } else
683 ata_device_add(probe_ent);
684
685 kfree(probe_ent);
686 kfree(probe_ent2);
687
688 return 0;
689
690err_out_regions:
691 if (legacy_mode & (1 << 0))
692 release_region(0x1f0, 8);
693 if (legacy_mode & (1 << 1))
694 release_region(0x170, 8);
695 pci_release_regions(pdev);
696err_out:
697 if (disable_dev_on_err)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#endif /* CONFIG_PCI */
703
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 4f91b0dc572b..714b42bad935 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,24 +61,17 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_busy_sleep (struct ata_port *ap, 64static unsigned int ata_dev_init_params(struct ata_port *ap,
65 unsigned long tmout_pat, 65 struct ata_device *dev);
66 unsigned long tmout);
67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
69static void ata_set_mode(struct ata_port *ap); 66static void ata_set_mode(struct ata_port *ap);
70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 67static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); 68static unsigned int ata_dev_xfermask(struct ata_port *ap,
72static int fgb(u32 bitmap); 69 struct ata_device *dev);
73static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77 70
78static unsigned int ata_unique_id = 1; 71static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 72static struct workqueue_struct *ata_wq;
80 73
81int atapi_enabled = 0; 74int atapi_enabled = 1;
82module_param(atapi_enabled, int, 0444); 75module_param(atapi_enabled, int, 0444);
83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 76MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84 77
@@ -91,403 +84,6 @@ MODULE_DESCRIPTION("Library module for ATA devices");
91MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION); 85MODULE_VERSION(DRV_VERSION);
93 86
94/**
95 * ata_tf_load_pio - send taskfile registers to host controller
96 * @ap: Port to which output is sent
97 * @tf: ATA taskfile register set
98 *
99 * Outputs ATA taskfile to standard ATA host controller.
100 *
101 * LOCKING:
102 * Inherited from caller.
103 */
104
105static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
106{
107 struct ata_ioports *ioaddr = &ap->ioaddr;
108 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
109
110 if (tf->ctl != ap->last_ctl) {
111 outb(tf->ctl, ioaddr->ctl_addr);
112 ap->last_ctl = tf->ctl;
113 ata_wait_idle(ap);
114 }
115
116 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
117 outb(tf->hob_feature, ioaddr->feature_addr);
118 outb(tf->hob_nsect, ioaddr->nsect_addr);
119 outb(tf->hob_lbal, ioaddr->lbal_addr);
120 outb(tf->hob_lbam, ioaddr->lbam_addr);
121 outb(tf->hob_lbah, ioaddr->lbah_addr);
122 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
123 tf->hob_feature,
124 tf->hob_nsect,
125 tf->hob_lbal,
126 tf->hob_lbam,
127 tf->hob_lbah);
128 }
129
130 if (is_addr) {
131 outb(tf->feature, ioaddr->feature_addr);
132 outb(tf->nsect, ioaddr->nsect_addr);
133 outb(tf->lbal, ioaddr->lbal_addr);
134 outb(tf->lbam, ioaddr->lbam_addr);
135 outb(tf->lbah, ioaddr->lbah_addr);
136 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
137 tf->feature,
138 tf->nsect,
139 tf->lbal,
140 tf->lbam,
141 tf->lbah);
142 }
143
144 if (tf->flags & ATA_TFLAG_DEVICE) {
145 outb(tf->device, ioaddr->device_addr);
146 VPRINTK("device 0x%X\n", tf->device);
147 }
148
149 ata_wait_idle(ap);
150}
151
152/**
153 * ata_tf_load_mmio - send taskfile registers to host controller
154 * @ap: Port to which output is sent
155 * @tf: ATA taskfile register set
156 *
157 * Outputs ATA taskfile to standard ATA host controller using MMIO.
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
163static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
164{
165 struct ata_ioports *ioaddr = &ap->ioaddr;
166 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
167
168 if (tf->ctl != ap->last_ctl) {
169 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
170 ap->last_ctl = tf->ctl;
171 ata_wait_idle(ap);
172 }
173
174 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
175 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
176 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
177 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
178 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
179 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
180 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
181 tf->hob_feature,
182 tf->hob_nsect,
183 tf->hob_lbal,
184 tf->hob_lbam,
185 tf->hob_lbah);
186 }
187
188 if (is_addr) {
189 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
190 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
191 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
192 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
193 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
194 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
195 tf->feature,
196 tf->nsect,
197 tf->lbal,
198 tf->lbam,
199 tf->lbah);
200 }
201
202 if (tf->flags & ATA_TFLAG_DEVICE) {
203 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
204 VPRINTK("device 0x%X\n", tf->device);
205 }
206
207 ata_wait_idle(ap);
208}
209
210
211/**
212 * ata_tf_load - send taskfile registers to host controller
213 * @ap: Port to which output is sent
214 * @tf: ATA taskfile register set
215 *
216 * Outputs ATA taskfile to standard ATA host controller using MMIO
217 * or PIO as indicated by the ATA_FLAG_MMIO flag.
218 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
219 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
220 * hob_lbal, hob_lbam, and hob_lbah.
221 *
222 * This function waits for idle (!BUSY and !DRQ) after writing
223 * registers. If the control register has a new value, this
224 * function also waits for idle after writing control and before
225 * writing the remaining registers.
226 *
227 * May be used as the tf_load() entry in ata_port_operations.
228 *
229 * LOCKING:
230 * Inherited from caller.
231 */
232void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
233{
234 if (ap->flags & ATA_FLAG_MMIO)
235 ata_tf_load_mmio(ap, tf);
236 else
237 ata_tf_load_pio(ap, tf);
238}
239
240/**
241 * ata_exec_command_pio - issue ATA command to host controller
242 * @ap: port to which command is being issued
243 * @tf: ATA taskfile register set
244 *
245 * Issues PIO write to ATA command register, with proper
246 * synchronization with interrupt handler / other threads.
247 *
248 * LOCKING:
249 * spin_lock_irqsave(host_set lock)
250 */
251
252static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
253{
254 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
255
256 outb(tf->command, ap->ioaddr.command_addr);
257 ata_pause(ap);
258}
259
260
261/**
262 * ata_exec_command_mmio - issue ATA command to host controller
263 * @ap: port to which command is being issued
264 * @tf: ATA taskfile register set
265 *
266 * Issues MMIO write to ATA command register, with proper
267 * synchronization with interrupt handler / other threads.
268 *
269 * LOCKING:
270 * spin_lock_irqsave(host_set lock)
271 */
272
273static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
274{
275 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
276
277 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
278 ata_pause(ap);
279}
280
281
282/**
283 * ata_exec_command - issue ATA command to host controller
284 * @ap: port to which command is being issued
285 * @tf: ATA taskfile register set
286 *
287 * Issues PIO/MMIO write to ATA command register, with proper
288 * synchronization with interrupt handler / other threads.
289 *
290 * LOCKING:
291 * spin_lock_irqsave(host_set lock)
292 */
293void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
294{
295 if (ap->flags & ATA_FLAG_MMIO)
296 ata_exec_command_mmio(ap, tf);
297 else
298 ata_exec_command_pio(ap, tf);
299}
300
301/**
302 * ata_tf_to_host - issue ATA taskfile to host controller
303 * @ap: port to which command is being issued
304 * @tf: ATA taskfile register set
305 *
306 * Issues ATA taskfile register set to ATA host controller,
307 * with proper synchronization with interrupt handler and
308 * other threads.
309 *
310 * LOCKING:
311 * spin_lock_irqsave(host_set lock)
312 */
313
314static inline void ata_tf_to_host(struct ata_port *ap,
315 const struct ata_taskfile *tf)
316{
317 ap->ops->tf_load(ap, tf);
318 ap->ops->exec_command(ap, tf);
319}
320
321/**
322 * ata_tf_read_pio - input device's ATA taskfile shadow registers
323 * @ap: Port from which input is read
324 * @tf: ATA taskfile register set for storing input
325 *
326 * Reads ATA taskfile registers for currently-selected device
327 * into @tf.
328 *
329 * LOCKING:
330 * Inherited from caller.
331 */
332
333static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
334{
335 struct ata_ioports *ioaddr = &ap->ioaddr;
336
337 tf->command = ata_check_status(ap);
338 tf->feature = inb(ioaddr->error_addr);
339 tf->nsect = inb(ioaddr->nsect_addr);
340 tf->lbal = inb(ioaddr->lbal_addr);
341 tf->lbam = inb(ioaddr->lbam_addr);
342 tf->lbah = inb(ioaddr->lbah_addr);
343 tf->device = inb(ioaddr->device_addr);
344
345 if (tf->flags & ATA_TFLAG_LBA48) {
346 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
347 tf->hob_feature = inb(ioaddr->error_addr);
348 tf->hob_nsect = inb(ioaddr->nsect_addr);
349 tf->hob_lbal = inb(ioaddr->lbal_addr);
350 tf->hob_lbam = inb(ioaddr->lbam_addr);
351 tf->hob_lbah = inb(ioaddr->lbah_addr);
352 }
353}
354
355/**
356 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
357 * @ap: Port from which input is read
358 * @tf: ATA taskfile register set for storing input
359 *
360 * Reads ATA taskfile registers for currently-selected device
361 * into @tf via MMIO.
362 *
363 * LOCKING:
364 * Inherited from caller.
365 */
366
367static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
368{
369 struct ata_ioports *ioaddr = &ap->ioaddr;
370
371 tf->command = ata_check_status(ap);
372 tf->feature = readb((void __iomem *)ioaddr->error_addr);
373 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
374 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
375 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
376 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
377 tf->device = readb((void __iomem *)ioaddr->device_addr);
378
379 if (tf->flags & ATA_TFLAG_LBA48) {
380 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
381 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
382 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
383 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
384 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
385 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
386 }
387}
388
389
390/**
391 * ata_tf_read - input device's ATA taskfile shadow registers
392 * @ap: Port from which input is read
393 * @tf: ATA taskfile register set for storing input
394 *
395 * Reads ATA taskfile registers for currently-selected device
396 * into @tf.
397 *
398 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
399 * is set, also reads the hob registers.
400 *
401 * May be used as the tf_read() entry in ata_port_operations.
402 *
403 * LOCKING:
404 * Inherited from caller.
405 */
406void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
407{
408 if (ap->flags & ATA_FLAG_MMIO)
409 ata_tf_read_mmio(ap, tf);
410 else
411 ata_tf_read_pio(ap, tf);
412}
413
414/**
415 * ata_check_status_pio - Read device status reg & clear interrupt
416 * @ap: port where the device is
417 *
418 * Reads ATA taskfile status register for currently-selected device
419 * and return its value. This also clears pending interrupts
420 * from this device
421 *
422 * LOCKING:
423 * Inherited from caller.
424 */
425static u8 ata_check_status_pio(struct ata_port *ap)
426{
427 return inb(ap->ioaddr.status_addr);
428}
429
430/**
431 * ata_check_status_mmio - Read device status reg & clear interrupt
432 * @ap: port where the device is
433 *
434 * Reads ATA taskfile status register for currently-selected device
435 * via MMIO and return its value. This also clears pending interrupts
436 * from this device
437 *
438 * LOCKING:
439 * Inherited from caller.
440 */
441static u8 ata_check_status_mmio(struct ata_port *ap)
442{
443 return readb((void __iomem *) ap->ioaddr.status_addr);
444}
445
446
447/**
448 * ata_check_status - Read device status reg & clear interrupt
449 * @ap: port where the device is
450 *
451 * Reads ATA taskfile status register for currently-selected device
452 * and return its value. This also clears pending interrupts
453 * from this device
454 *
455 * May be used as the check_status() entry in ata_port_operations.
456 *
457 * LOCKING:
458 * Inherited from caller.
459 */
460u8 ata_check_status(struct ata_port *ap)
461{
462 if (ap->flags & ATA_FLAG_MMIO)
463 return ata_check_status_mmio(ap);
464 return ata_check_status_pio(ap);
465}
466
467
468/**
469 * ata_altstatus - Read device alternate status reg
470 * @ap: port where the device is
471 *
472 * Reads ATA taskfile alternate status register for
473 * currently-selected device and return its value.
474 *
475 * Note: may NOT be used as the check_altstatus() entry in
476 * ata_port_operations.
477 *
478 * LOCKING:
479 * Inherited from caller.
480 */
481u8 ata_altstatus(struct ata_port *ap)
482{
483 if (ap->ops->check_altstatus)
484 return ap->ops->check_altstatus(ap);
485
486 if (ap->flags & ATA_FLAG_MMIO)
487 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
488 return inb(ap->ioaddr.altstatus_addr);
489}
490
491 87
492/** 88/**
493 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
@@ -632,58 +228,148 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
632 return -1; 228 return -1;
633} 229}
634 230
635static const char * const xfer_mode_str[] = { 231/**
636 "UDMA/16", 232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
637 "UDMA/25", 233 * @pio_mask: pio_mask
638 "UDMA/33", 234 * @mwdma_mask: mwdma_mask
639 "UDMA/44", 235 * @udma_mask: udma_mask
640 "UDMA/66", 236 *
641 "UDMA/100", 237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
642 "UDMA/133", 238 * unsigned int xfer_mask.
643 "UDMA7", 239 *
644 "MWDMA0", 240 * LOCKING:
645 "MWDMA1", 241 * None.
646 "MWDMA2", 242 *
647 "PIO0", 243 * RETURNS:
648 "PIO1", 244 * Packed xfer_mask.
649 "PIO2", 245 */
650 "PIO3", 246static unsigned int ata_pack_xfermask(unsigned int pio_mask,
651 "PIO4", 247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
249{
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
253}
254
255static const struct ata_xfer_ent {
256 unsigned int shift, bits;
257 u8 base;
258} ata_xfer_tbl[] = {
259 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
260 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
261 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
262 { -1, },
652}; 263};
653 264
654/** 265/**
655 * ata_udma_string - convert UDMA bit offset to string 266 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
656 * @mask: mask of bits supported; only highest bit counts. 267 * @xfer_mask: xfer_mask of interest
657 * 268 *
658 * Determine string which represents the highest speed 269 * Return matching XFER_* value for @xfer_mask. Only the highest
659 * (highest bit in @udma_mask). 270 * bit of @xfer_mask is considered.
660 * 271 *
661 * LOCKING: 272 * LOCKING:
662 * None. 273 * None.
663 * 274 *
664 * RETURNS: 275 * RETURNS:
665 * Constant C string representing highest speed listed in 276 * Matching XFER_* value, 0 if no match found.
666 * @udma_mask, or the constant C string "<n/a>".
667 */ 277 */
278static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
279{
280 int highbit = fls(xfer_mask) - 1;
281 const struct ata_xfer_ent *ent;
668 282
669static const char *ata_mode_string(unsigned int mask) 283 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
284 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
285 return ent->base + highbit - ent->shift;
286 return 0;
287}
288
289/**
290 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
291 * @xfer_mode: XFER_* of interest
292 *
293 * Return matching xfer_mask for @xfer_mode.
294 *
295 * LOCKING:
296 * None.
297 *
298 * RETURNS:
299 * Matching xfer_mask, 0 if no match found.
300 */
301static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
670{ 302{
671 int i; 303 const struct ata_xfer_ent *ent;
672 304
673 for (i = 7; i >= 0; i--) 305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
674 if (mask & (1 << i)) 306 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
675 goto out; 307 return 1 << (ent->shift + xfer_mode - ent->base);
676 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--) 308 return 0;
677 if (mask & (1 << i)) 309}
678 goto out;
679 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
680 if (mask & (1 << i))
681 goto out;
682 310
683 return "<n/a>"; 311/**
312 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
313 * @xfer_mode: XFER_* of interest
314 *
315 * Return matching xfer_shift for @xfer_mode.
316 *
317 * LOCKING:
318 * None.
319 *
320 * RETURNS:
321 * Matching xfer_shift, -1 if no match found.
322 */
323static int ata_xfer_mode2shift(unsigned int xfer_mode)
324{
325 const struct ata_xfer_ent *ent;
684 326
685out: 327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
686 return xfer_mode_str[i]; 328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return ent->shift;
330 return -1;
331}
332
333/**
334 * ata_mode_string - convert xfer_mask to string
335 * @xfer_mask: mask of bits supported; only highest bit counts.
336 *
337 * Determine string which represents the highest speed
338 * (highest bit in @modemask).
339 *
340 * LOCKING:
341 * None.
342 *
343 * RETURNS:
344 * Constant C string representing highest speed listed in
345 * @mode_mask, or the constant C string "<n/a>".
346 */
347static const char *ata_mode_string(unsigned int xfer_mask)
348{
349 static const char * const xfer_mode_str[] = {
350 "PIO0",
351 "PIO1",
352 "PIO2",
353 "PIO3",
354 "PIO4",
355 "MWDMA0",
356 "MWDMA1",
357 "MWDMA2",
358 "UDMA/16",
359 "UDMA/25",
360 "UDMA/33",
361 "UDMA/44",
362 "UDMA/66",
363 "UDMA/100",
364 "UDMA/133",
365 "UDMA7",
366 };
367 int highbit;
368
369 highbit = fls(xfer_mask) - 1;
370 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
371 return xfer_mode_str[highbit];
372 return "<n/a>";
687} 373}
688 374
689/** 375/**
@@ -838,6 +524,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
838 * ata_dev_try_classify - Parse returned ATA device signature 524 * ata_dev_try_classify - Parse returned ATA device signature
839 * @ap: ATA channel to examine 525 * @ap: ATA channel to examine
840 * @device: Device to examine (starting at zero) 526 * @device: Device to examine (starting at zero)
527 * @r_err: Value of error register on completion
841 * 528 *
842 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 529 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
843 * an ATA/ATAPI-defined set of values is placed in the ATA 530 * an ATA/ATAPI-defined set of values is placed in the ATA
@@ -850,11 +537,14 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
850 * 537 *
851 * LOCKING: 538 * LOCKING:
852 * caller. 539 * caller.
540 *
541 * RETURNS:
542 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
853 */ 543 */
854 544
855static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 545static unsigned int
546ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
856{ 547{
857 struct ata_device *dev = &ap->device[device];
858 struct ata_taskfile tf; 548 struct ata_taskfile tf;
859 unsigned int class; 549 unsigned int class;
860 u8 err; 550 u8 err;
@@ -865,8 +555,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
865 555
866 ap->ops->tf_read(ap, &tf); 556 ap->ops->tf_read(ap, &tf);
867 err = tf.feature; 557 err = tf.feature;
868 558 if (r_err)
869 dev->class = ATA_DEV_NONE; 559 *r_err = err;
870 560
871 /* see if device passed diags */ 561 /* see if device passed diags */
872 if (err == 1) 562 if (err == 1)
@@ -874,22 +564,20 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
874 else if ((device == 0) && (err == 0x81)) 564 else if ((device == 0) && (err == 0x81))
875 /* do nothing */ ; 565 /* do nothing */ ;
876 else 566 else
877 return err; 567 return ATA_DEV_NONE;
878 568
879 /* determine if device if ATA or ATAPI */ 569 /* determine if device is ATA or ATAPI */
880 class = ata_dev_classify(&tf); 570 class = ata_dev_classify(&tf);
571
881 if (class == ATA_DEV_UNKNOWN) 572 if (class == ATA_DEV_UNKNOWN)
882 return err; 573 return ATA_DEV_NONE;
883 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 574 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
884 return err; 575 return ATA_DEV_NONE;
885 576 return class;
886 dev->class = class;
887
888 return err;
889} 577}
890 578
891/** 579/**
892 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string 580 * ata_id_string - Convert IDENTIFY DEVICE page into string
893 * @id: IDENTIFY DEVICE results we will examine 581 * @id: IDENTIFY DEVICE results we will examine
894 * @s: string into which data is output 582 * @s: string into which data is output
895 * @ofs: offset into identify device page 583 * @ofs: offset into identify device page
@@ -903,8 +591,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
903 * caller. 591 * caller.
904 */ 592 */
905 593
906void ata_dev_id_string(const u16 *id, unsigned char *s, 594void ata_id_string(const u16 *id, unsigned char *s,
907 unsigned int ofs, unsigned int len) 595 unsigned int ofs, unsigned int len)
908{ 596{
909 unsigned int c; 597 unsigned int c;
910 598
@@ -922,6 +610,49 @@ void ata_dev_id_string(const u16 *id, unsigned char *s,
922 } 610 }
923} 611}
924 612
613/**
614 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an odd number.
619 *
620 * This function is identical to ata_id_string except that it
621 * trims trailing spaces and terminates the resulting string with
622 * null. @len must be actual maximum length (even number) + 1.
623 *
624 * LOCKING:
625 * caller.
626 */
627void ata_id_c_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
629{
630 unsigned char *p;
631
632 WARN_ON(!(len & 1));
633
634 ata_id_string(id, s, ofs, len - 1);
635
636 p = s + strnlen(s, len - 1);
637 while (p > s && p[-1] == ' ')
638 p--;
639 *p = '\0';
640}
641
642static u64 ata_id_n_sectors(const u16 *id)
643{
644 if (ata_id_has_lba(id)) {
645 if (ata_id_has_lba48(id))
646 return ata_id_u64(id, 100);
647 else
648 return ata_id_u32(id, 60);
649 } else {
650 if (ata_id_current_chs_valid(id))
651 return ata_id_u32(id, 57);
652 else
653 return id[1] * id[3] * id[6];
654 }
655}
925 656
926/** 657/**
927 * ata_noop_dev_select - Select device 0/1 on ATA bus 658 * ata_noop_dev_select - Select device 0/1 on ATA bus
@@ -1011,90 +742,172 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1011 742
1012/** 743/**
1013 * ata_dump_id - IDENTIFY DEVICE info debugging output 744 * ata_dump_id - IDENTIFY DEVICE info debugging output
1014 * @dev: Device whose IDENTIFY DEVICE page we will dump 745 * @id: IDENTIFY DEVICE page to dump
1015 * 746 *
1016 * Dump selected 16-bit words from a detected device's 747 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1017 * IDENTIFY PAGE page. 748 * page.
1018 * 749 *
1019 * LOCKING: 750 * LOCKING:
1020 * caller. 751 * caller.
1021 */ 752 */
1022 753
1023static inline void ata_dump_id(const struct ata_device *dev) 754static inline void ata_dump_id(const u16 *id)
1024{ 755{
1025 DPRINTK("49==0x%04x " 756 DPRINTK("49==0x%04x "
1026 "53==0x%04x " 757 "53==0x%04x "
1027 "63==0x%04x " 758 "63==0x%04x "
1028 "64==0x%04x " 759 "64==0x%04x "
1029 "75==0x%04x \n", 760 "75==0x%04x \n",
1030 dev->id[49], 761 id[49],
1031 dev->id[53], 762 id[53],
1032 dev->id[63], 763 id[63],
1033 dev->id[64], 764 id[64],
1034 dev->id[75]); 765 id[75]);
1035 DPRINTK("80==0x%04x " 766 DPRINTK("80==0x%04x "
1036 "81==0x%04x " 767 "81==0x%04x "
1037 "82==0x%04x " 768 "82==0x%04x "
1038 "83==0x%04x " 769 "83==0x%04x "
1039 "84==0x%04x \n", 770 "84==0x%04x \n",
1040 dev->id[80], 771 id[80],
1041 dev->id[81], 772 id[81],
1042 dev->id[82], 773 id[82],
1043 dev->id[83], 774 id[83],
1044 dev->id[84]); 775 id[84]);
1045 DPRINTK("88==0x%04x " 776 DPRINTK("88==0x%04x "
1046 "93==0x%04x\n", 777 "93==0x%04x\n",
1047 dev->id[88], 778 id[88],
1048 dev->id[93]); 779 id[93]);
1049} 780}
1050 781
1051/* 782/**
1052 * Compute the PIO modes available for this device. This is not as 783 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1053 * trivial as it seems if we must consider early devices correctly. 784 * @id: IDENTIFY data to compute xfer mask from
785 *
786 * Compute the xfermask for this device. This is not as trivial
787 * as it seems if we must consider early devices correctly.
788 *
789 * FIXME: pre IDE drive timing (do we care ?).
790 *
791 * LOCKING:
792 * None.
1054 * 793 *
1055 * FIXME: pre IDE drive timing (do we care ?). 794 * RETURNS:
795 * Computed xfermask
1056 */ 796 */
1057 797static unsigned int ata_id_xfermask(const u16 *id)
1058static unsigned int ata_pio_modes(const struct ata_device *adev)
1059{ 798{
1060 u16 modes; 799 unsigned int pio_mask, mwdma_mask, udma_mask;
1061 800
1062 /* Usual case. Word 53 indicates word 64 is valid */ 801 /* Usual case. Word 53 indicates word 64 is valid */
1063 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) { 802 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1064 modes = adev->id[ATA_ID_PIO_MODES] & 0x03; 803 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1065 modes <<= 3; 804 pio_mask <<= 3;
1066 modes |= 0x7; 805 pio_mask |= 0x7;
1067 return modes; 806 } else {
807 /* If word 64 isn't valid then Word 51 high byte holds
808 * the PIO timing number for the maximum. Turn it into
809 * a mask.
810 */
811 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
812
813 /* But wait.. there's more. Design your standards by
814 * committee and you too can get a free iordy field to
815 * process. However its the speeds not the modes that
816 * are supported... Note drivers using the timing API
817 * will get this right anyway
818 */
1068 } 819 }
1069 820
1070 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing 821 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1071 number for the maximum. Turn it into a mask and return it */ 822
1072 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ; 823 udma_mask = 0;
1073 return modes; 824 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1074 /* But wait.. there's more. Design your standards by committee and 825 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1075 you too can get a free iordy field to process. However its the 826
1076 speeds not the modes that are supported... Note drivers using the 827 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1077 timing API will get this right anyway */
1078} 828}
1079 829
1080struct ata_exec_internal_arg { 830/**
1081 unsigned int err_mask; 831 * ata_port_queue_task - Queue port_task
1082 struct ata_taskfile *tf; 832 * @ap: The ata_port to queue port_task for
1083 struct completion *waiting; 833 *
1084}; 834 * Schedule @fn(@data) for execution after @delay jiffies using
835 * port_task. There is one port_task per port and it's the
836 * user(low level driver)'s responsibility to make sure that only
837 * one task is active at any given time.
838 *
839 * libata core layer takes care of synchronization between
840 * port_task and EH. ata_port_queue_task() may be ignored for EH
841 * synchronization.
842 *
843 * LOCKING:
844 * Inherited from caller.
845 */
846void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
847 unsigned long delay)
848{
849 int rc;
850
851 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
852 return;
853
854 PREPARE_WORK(&ap->port_task, fn, data);
855
856 if (!delay)
857 rc = queue_work(ata_wq, &ap->port_task);
858 else
859 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
860
861 /* rc == 0 means that another user is using port task */
862 WARN_ON(rc == 0);
863}
1085 864
1086int ata_qc_complete_internal(struct ata_queued_cmd *qc) 865/**
866 * ata_port_flush_task - Flush port_task
867 * @ap: The ata_port to flush port_task for
868 *
869 * After this function completes, port_task is guranteed not to
870 * be running or scheduled.
871 *
872 * LOCKING:
873 * Kernel thread context (may sleep)
874 */
875void ata_port_flush_task(struct ata_port *ap)
1087{ 876{
1088 struct ata_exec_internal_arg *arg = qc->private_data; 877 unsigned long flags;
1089 struct completion *waiting = arg->waiting;
1090 878
1091 if (!(qc->err_mask & ~AC_ERR_DEV)) 879 DPRINTK("ENTER\n");
1092 qc->ap->ops->tf_read(qc->ap, arg->tf);
1093 arg->err_mask = qc->err_mask;
1094 arg->waiting = NULL;
1095 complete(waiting);
1096 880
1097 return 0; 881 spin_lock_irqsave(&ap->host_set->lock, flags);
882 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
883 spin_unlock_irqrestore(&ap->host_set->lock, flags);
884
885 DPRINTK("flush #1\n");
886 flush_workqueue(ata_wq);
887
888 /*
889 * At this point, if a task is running, it's guaranteed to see
890 * the FLUSH flag; thus, it will never queue pio tasks again.
891 * Cancel and flush.
892 */
893 if (!cancel_delayed_work(&ap->port_task)) {
894 DPRINTK("flush #2\n");
895 flush_workqueue(ata_wq);
896 }
897
898 spin_lock_irqsave(&ap->host_set->lock, flags);
899 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
900 spin_unlock_irqrestore(&ap->host_set->lock, flags);
901
902 DPRINTK("EXIT\n");
903}
904
905void ata_qc_complete_internal(struct ata_queued_cmd *qc)
906{
907 struct completion *waiting = qc->private_data;
908
909 qc->ap->ops->tf_read(qc->ap, &qc->tf);
910 complete(waiting);
1098} 911}
1099 912
1100/** 913/**
@@ -1125,7 +938,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1125 struct ata_queued_cmd *qc; 938 struct ata_queued_cmd *qc;
1126 DECLARE_COMPLETION(wait); 939 DECLARE_COMPLETION(wait);
1127 unsigned long flags; 940 unsigned long flags;
1128 struct ata_exec_internal_arg arg; 941 unsigned int err_mask;
1129 942
1130 spin_lock_irqsave(&ap->host_set->lock, flags); 943 spin_lock_irqsave(&ap->host_set->lock, flags);
1131 944
@@ -1139,13 +952,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1139 qc->nsect = buflen / ATA_SECT_SIZE; 952 qc->nsect = buflen / ATA_SECT_SIZE;
1140 } 953 }
1141 954
1142 arg.waiting = &wait; 955 qc->private_data = &wait;
1143 arg.tf = tf;
1144 qc->private_data = &arg;
1145 qc->complete_fn = ata_qc_complete_internal; 956 qc->complete_fn = ata_qc_complete_internal;
1146 957
1147 if (ata_qc_issue(qc)) 958 qc->err_mask = ata_qc_issue(qc);
1148 goto issue_fail; 959 if (qc->err_mask)
960 ata_qc_complete(qc);
1149 961
1150 spin_unlock_irqrestore(&ap->host_set->lock, flags); 962 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1151 963
@@ -1158,8 +970,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1158 * before the caller cleans up, it will result in a 970 * before the caller cleans up, it will result in a
1159 * spurious interrupt. We can live with that. 971 * spurious interrupt. We can live with that.
1160 */ 972 */
1161 if (arg.waiting) { 973 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1162 qc->err_mask = AC_ERR_OTHER; 974 qc->err_mask = AC_ERR_TIMEOUT;
1163 ata_qc_complete(qc); 975 ata_qc_complete(qc);
1164 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 976 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1165 ap->id, command); 977 ap->id, command);
@@ -1168,12 +980,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1168 spin_unlock_irqrestore(&ap->host_set->lock, flags); 980 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1169 } 981 }
1170 982
1171 return arg.err_mask; 983 *tf = qc->tf;
984 err_mask = qc->err_mask;
1172 985
1173 issue_fail:
1174 ata_qc_free(qc); 986 ata_qc_free(qc);
1175 spin_unlock_irqrestore(&ap->host_set->lock, flags); 987
1176 return AC_ERR_OTHER; 988 return err_mask;
1177} 989}
1178 990
1179/** 991/**
@@ -1210,73 +1022,78 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1210} 1022}
1211 1023
1212/** 1024/**
1213 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1025 * ata_dev_read_id - Read ID data from the specified device
1214 * @ap: port on which device we wish to probe resides 1026 * @ap: port on which target device resides
1215 * @device: device bus address, starting at zero 1027 * @dev: target device
1216 * 1028 * @p_class: pointer to class of the target device (may be changed)
1217 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE 1029 * @post_reset: is this read ID post-reset?
1218 * command, and read back the 512-byte device information page. 1030 * @p_id: read IDENTIFY page (newly allocated)
1219 * The device information page is fed to us via the standard 1031 *
1220 * PIO-IN protocol, but we hand-code it here. (TODO: investigate 1032 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1221 * using standard PIO-IN paths) 1033 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1222 * 1034 * devices. This function also takes care of EDD signature
1223 * After reading the device information page, we use several 1035 * misreporting (to be removed once EDD support is gone) and
1224 * bits of information from it to initialize data structures 1036 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1225 * that will be used during the lifetime of the ata_device.
1226 * Other data from the info page is used to disqualify certain
1227 * older ATA devices we do not wish to support.
1228 * 1037 *
1229 * LOCKING: 1038 * LOCKING:
1230 * Inherited from caller. Some functions called by this function 1039 * Kernel thread context (may sleep)
1231 * obtain the host_set lock. 1040 *
1041 * RETURNS:
1042 * 0 on success, -errno otherwise.
1232 */ 1043 */
1233 1044static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1234static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1045 unsigned int *p_class, int post_reset, u16 **p_id)
1235{ 1046{
1236 struct ata_device *dev = &ap->device[device]; 1047 unsigned int class = *p_class;
1237 unsigned int major_version;
1238 u16 tmp;
1239 unsigned long xfer_modes;
1240 unsigned int using_edd; 1048 unsigned int using_edd;
1241 struct ata_taskfile tf; 1049 struct ata_taskfile tf;
1242 unsigned int err_mask; 1050 unsigned int err_mask = 0;
1051 u16 *id;
1052 const char *reason;
1243 int rc; 1053 int rc;
1244 1054
1245 if (!ata_dev_present(dev)) { 1055 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1246 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1247 ap->id, device);
1248 return;
1249 }
1250 1056
1251 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 1057 if (ap->ops->probe_reset ||
1058 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1252 using_edd = 0; 1059 using_edd = 0;
1253 else 1060 else
1254 using_edd = 1; 1061 using_edd = 1;
1255 1062
1256 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 1063 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1257
1258 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1259 dev->class == ATA_DEV_NONE);
1260 1064
1261 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 1065 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1066 if (id == NULL) {
1067 rc = -ENOMEM;
1068 reason = "out of memory";
1069 goto err_out;
1070 }
1262 1071
1263retry: 1072 retry:
1264 ata_tf_init(ap, &tf, device); 1073 ata_tf_init(ap, &tf, dev->devno);
1265 1074
1266 if (dev->class == ATA_DEV_ATA) { 1075 switch (class) {
1076 case ATA_DEV_ATA:
1267 tf.command = ATA_CMD_ID_ATA; 1077 tf.command = ATA_CMD_ID_ATA;
1268 DPRINTK("do ATA identify\n"); 1078 break;
1269 } else { 1079 case ATA_DEV_ATAPI:
1270 tf.command = ATA_CMD_ID_ATAPI; 1080 tf.command = ATA_CMD_ID_ATAPI;
1271 DPRINTK("do ATAPI identify\n"); 1081 break;
1082 default:
1083 rc = -ENODEV;
1084 reason = "unsupported class";
1085 goto err_out;
1272 } 1086 }
1273 1087
1274 tf.protocol = ATA_PROT_PIO; 1088 tf.protocol = ATA_PROT_PIO;
1275 1089
1276 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 1090 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1277 dev->id, sizeof(dev->id)); 1091 id, sizeof(id[0]) * ATA_ID_WORDS);
1278 1092
1279 if (err_mask) { 1093 if (err_mask) {
1094 rc = -EIO;
1095 reason = "I/O error";
1096
1280 if (err_mask & ~AC_ERR_DEV) 1097 if (err_mask & ~AC_ERR_DEV)
1281 goto err_out; 1098 goto err_out;
1282 1099
@@ -1291,180 +1108,223 @@ retry:
1291 * ATA software reset (SRST, the default) does not appear 1108 * ATA software reset (SRST, the default) does not appear
1292 * to have this problem. 1109 * to have this problem.
1293 */ 1110 */
1294 if ((using_edd) && (dev->class == ATA_DEV_ATA)) { 1111 if ((using_edd) && (class == ATA_DEV_ATA)) {
1295 u8 err = tf.feature; 1112 u8 err = tf.feature;
1296 if (err & ATA_ABORTED) { 1113 if (err & ATA_ABORTED) {
1297 dev->class = ATA_DEV_ATAPI; 1114 class = ATA_DEV_ATAPI;
1298 goto retry; 1115 goto retry;
1299 } 1116 }
1300 } 1117 }
1301 goto err_out; 1118 goto err_out;
1302 } 1119 }
1303 1120
1304 swap_buf_le16(dev->id, ATA_ID_WORDS); 1121 swap_buf_le16(id, ATA_ID_WORDS);
1122
1123 /* sanity check */
1124 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1125 rc = -EINVAL;
1126 reason = "device reports illegal type";
1127 goto err_out;
1128 }
1129
1130 if (post_reset && class == ATA_DEV_ATA) {
1131 /*
1132 * The exact sequence expected by certain pre-ATA4 drives is:
1133 * SRST RESET
1134 * IDENTIFY
1135 * INITIALIZE DEVICE PARAMETERS
1136 * anything else..
1137 * Some drives were very specific about that exact sequence.
1138 */
1139 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1140 err_mask = ata_dev_init_params(ap, dev);
1141 if (err_mask) {
1142 rc = -EIO;
1143 reason = "INIT_DEV_PARAMS failed";
1144 goto err_out;
1145 }
1146
1147 /* current CHS translation info (id[53-58]) might be
1148 * changed. reread the identify device info.
1149 */
1150 post_reset = 0;
1151 goto retry;
1152 }
1153 }
1154
1155 *p_class = class;
1156 *p_id = id;
1157 return 0;
1158
1159 err_out:
1160 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1161 ap->id, dev->devno, reason);
1162 kfree(id);
1163 return rc;
1164}
1165
1166static inline u8 ata_dev_knobble(const struct ata_port *ap,
1167 struct ata_device *dev)
1168{
1169 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1170}
1171
1172/**
1173 * ata_dev_configure - Configure the specified ATA/ATAPI device
1174 * @ap: Port on which target device resides
1175 * @dev: Target device to configure
1176 * @print_info: Enable device info printout
1177 *
1178 * Configure @dev according to @dev->id. Generic and low-level
1179 * driver specific fixups are also applied.
1180 *
1181 * LOCKING:
1182 * Kernel thread context (may sleep)
1183 *
1184 * RETURNS:
1185 * 0 on success, -errno otherwise
1186 */
1187static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1188 int print_info)
1189{
1190 const u16 *id = dev->id;
1191 unsigned int xfer_mask;
1192 int i, rc;
1193
1194 if (!ata_dev_present(dev)) {
1195 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1196 ap->id, dev->devno);
1197 return 0;
1198 }
1199
1200 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1305 1201
1306 /* print device capabilities */ 1202 /* print device capabilities */
1307 printk(KERN_DEBUG "ata%u: dev %u cfg " 1203 if (print_info)
1308 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1204 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1309 ap->id, device, dev->id[49], 1205 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1310 dev->id[82], dev->id[83], dev->id[84], 1206 ap->id, dev->devno, id[49], id[82], id[83],
1311 dev->id[85], dev->id[86], dev->id[87], 1207 id[84], id[85], id[86], id[87], id[88]);
1312 dev->id[88]); 1208
1209 /* initialize to-be-configured parameters */
1210 dev->flags = 0;
1211 dev->max_sectors = 0;
1212 dev->cdb_len = 0;
1213 dev->n_sectors = 0;
1214 dev->cylinders = 0;
1215 dev->heads = 0;
1216 dev->sectors = 0;
1313 1217
1314 /* 1218 /*
1315 * common ATA, ATAPI feature tests 1219 * common ATA, ATAPI feature tests
1316 */ 1220 */
1317 1221
1318 /* we require DMA support (bits 8 of word 49) */ 1222 /* we require DMA support (bits 8 of word 49) */
1319 if (!ata_id_has_dma(dev->id)) { 1223 if (!ata_id_has_dma(id)) {
1320 printk(KERN_DEBUG "ata%u: no dma\n", ap->id); 1224 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1225 rc = -EINVAL;
1321 goto err_out_nosup; 1226 goto err_out_nosup;
1322 } 1227 }
1323 1228
1324 /* quick-n-dirty find max transfer mode; for printk only */ 1229 /* find max transfer mode; for printk only */
1325 xfer_modes = dev->id[ATA_ID_UDMA_MODES]; 1230 xfer_mask = ata_id_xfermask(id);
1326 if (!xfer_modes)
1327 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1328 if (!xfer_modes)
1329 xfer_modes = ata_pio_modes(dev);
1330 1231
1331 ata_dump_id(dev); 1232 ata_dump_id(id);
1332 1233
1333 /* ATA-specific feature tests */ 1234 /* ATA-specific feature tests */
1334 if (dev->class == ATA_DEV_ATA) { 1235 if (dev->class == ATA_DEV_ATA) {
1335 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1236 dev->n_sectors = ata_id_n_sectors(id);
1336 goto err_out_nosup;
1337 1237
1338 /* get major version */ 1238 if (ata_id_has_lba(id)) {
1339 tmp = dev->id[ATA_ID_MAJOR_VER]; 1239 const char *lba_desc;
1340 for (major_version = 14; major_version >= 1; major_version--)
1341 if (tmp & (1 << major_version))
1342 break;
1343 1240
1344 /* 1241 lba_desc = "LBA";
1345 * The exact sequence expected by certain pre-ATA4 drives is:
1346 * SRST RESET
1347 * IDENTIFY
1348 * INITIALIZE DEVICE PARAMETERS
1349 * anything else..
1350 * Some drives were very specific about that exact sequence.
1351 */
1352 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1353 ata_dev_init_params(ap, dev);
1354
1355 /* current CHS translation info (id[53-58]) might be
1356 * changed. reread the identify device info.
1357 */
1358 ata_dev_reread_id(ap, dev);
1359 }
1360
1361 if (ata_id_has_lba(dev->id)) {
1362 dev->flags |= ATA_DFLAG_LBA; 1242 dev->flags |= ATA_DFLAG_LBA;
1363 1243 if (ata_id_has_lba48(id)) {
1364 if (ata_id_has_lba48(dev->id)) {
1365 dev->flags |= ATA_DFLAG_LBA48; 1244 dev->flags |= ATA_DFLAG_LBA48;
1366 dev->n_sectors = ata_id_u64(dev->id, 100); 1245 lba_desc = "LBA48";
1367 } else {
1368 dev->n_sectors = ata_id_u32(dev->id, 60);
1369 } 1246 }
1370 1247
1371 /* print device info to dmesg */ 1248 /* print device info to dmesg */
1372 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1249 if (print_info)
1373 ap->id, device, 1250 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1374 major_version, 1251 "max %s, %Lu sectors: %s\n",
1375 ata_mode_string(xfer_modes), 1252 ap->id, dev->devno,
1376 (unsigned long long)dev->n_sectors, 1253 ata_id_major_version(id),
1377 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); 1254 ata_mode_string(xfer_mask),
1378 } else { 1255 (unsigned long long)dev->n_sectors,
1256 lba_desc);
1257 } else {
1379 /* CHS */ 1258 /* CHS */
1380 1259
1381 /* Default translation */ 1260 /* Default translation */
1382 dev->cylinders = dev->id[1]; 1261 dev->cylinders = id[1];
1383 dev->heads = dev->id[3]; 1262 dev->heads = id[3];
1384 dev->sectors = dev->id[6]; 1263 dev->sectors = id[6];
1385 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1386 1264
1387 if (ata_id_current_chs_valid(dev->id)) { 1265 if (ata_id_current_chs_valid(id)) {
1388 /* Current CHS translation is valid. */ 1266 /* Current CHS translation is valid. */
1389 dev->cylinders = dev->id[54]; 1267 dev->cylinders = id[54];
1390 dev->heads = dev->id[55]; 1268 dev->heads = id[55];
1391 dev->sectors = dev->id[56]; 1269 dev->sectors = id[56];
1392
1393 dev->n_sectors = ata_id_u32(dev->id, 57);
1394 } 1270 }
1395 1271
1396 /* print device info to dmesg */ 1272 /* print device info to dmesg */
1397 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", 1273 if (print_info)
1398 ap->id, device, 1274 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1399 major_version, 1275 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1400 ata_mode_string(xfer_modes), 1276 ap->id, dev->devno,
1401 (unsigned long long)dev->n_sectors, 1277 ata_id_major_version(id),
1402 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); 1278 ata_mode_string(xfer_mask),
1403 1279 (unsigned long long)dev->n_sectors,
1280 dev->cylinders, dev->heads, dev->sectors);
1404 } 1281 }
1405 1282
1406 ap->host->max_cmd_len = 16; 1283 dev->cdb_len = 16;
1407 } 1284 }
1408 1285
1409 /* ATAPI-specific feature tests */ 1286 /* ATAPI-specific feature tests */
1410 else if (dev->class == ATA_DEV_ATAPI) { 1287 else if (dev->class == ATA_DEV_ATAPI) {
1411 if (ata_id_is_ata(dev->id)) /* sanity check */ 1288 rc = atapi_cdb_len(id);
1412 goto err_out_nosup;
1413
1414 rc = atapi_cdb_len(dev->id);
1415 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1289 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1416 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1290 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1291 rc = -EINVAL;
1417 goto err_out_nosup; 1292 goto err_out_nosup;
1418 } 1293 }
1419 ap->cdb_len = (unsigned int) rc; 1294 dev->cdb_len = (unsigned int) rc;
1420 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1421 1295
1422 /* print device info to dmesg */ 1296 /* print device info to dmesg */
1423 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1297 if (print_info)
1424 ap->id, device, 1298 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1425 ata_mode_string(xfer_modes)); 1299 ap->id, dev->devno, ata_mode_string(xfer_mask));
1426 } 1300 }
1427 1301
1428 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1302 ap->host->max_cmd_len = 0;
1429 return; 1303 for (i = 0; i < ATA_MAX_DEVICES; i++)
1430 1304 ap->host->max_cmd_len = max_t(unsigned int,
1431err_out_nosup: 1305 ap->host->max_cmd_len,
1432 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", 1306 ap->device[i].cdb_len);
1433 ap->id, device);
1434err_out:
1435 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1436 DPRINTK("EXIT, err\n");
1437}
1438
1439
1440static inline u8 ata_dev_knobble(const struct ata_port *ap)
1441{
1442 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1443}
1444
1445/**
1446 * ata_dev_config - Run device specific handlers and check for
1447 * SATA->PATA bridges
1448 * @ap: Bus
1449 * @i: Device
1450 *
1451 * LOCKING:
1452 */
1453 1307
1454void ata_dev_config(struct ata_port *ap, unsigned int i)
1455{
1456 /* limit bridge transfers to udma5, 200 sectors */ 1308 /* limit bridge transfers to udma5, 200 sectors */
1457 if (ata_dev_knobble(ap)) { 1309 if (ata_dev_knobble(ap, dev)) {
1458 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1310 if (print_info)
1459 ap->id, ap->device->devno); 1311 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1312 ap->id, dev->devno);
1460 ap->udma_mask &= ATA_UDMA5; 1313 ap->udma_mask &= ATA_UDMA5;
1461 ap->host->max_sectors = ATA_MAX_SECTORS; 1314 dev->max_sectors = ATA_MAX_SECTORS;
1462 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1463 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1464 } 1315 }
1465 1316
1466 if (ap->ops->dev_config) 1317 if (ap->ops->dev_config)
1467 ap->ops->dev_config(ap, &ap->device[i]); 1318 ap->ops->dev_config(ap, dev);
1319
1320 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1321 return 0;
1322
1323err_out_nosup:
1324 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1325 ap->id, dev->devno);
1326 DPRINTK("EXIT, err\n");
1327 return rc;
1468} 1328}
1469 1329
1470/** 1330/**
@@ -1484,21 +1344,59 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
1484 1344
1485static int ata_bus_probe(struct ata_port *ap) 1345static int ata_bus_probe(struct ata_port *ap)
1486{ 1346{
1487 unsigned int i, found = 0; 1347 unsigned int classes[ATA_MAX_DEVICES];
1348 unsigned int i, rc, found = 0;
1488 1349
1489 ap->ops->phy_reset(ap); 1350 ata_port_probe(ap);
1490 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1351
1491 goto err_out; 1352 /* reset and determine device classes */
1353 for (i = 0; i < ATA_MAX_DEVICES; i++)
1354 classes[i] = ATA_DEV_UNKNOWN;
1355
1356 if (ap->ops->probe_reset) {
1357 rc = ap->ops->probe_reset(ap, classes);
1358 if (rc) {
1359 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1360 return rc;
1361 }
1362 } else {
1363 ap->ops->phy_reset(ap);
1364
1365 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1366 for (i = 0; i < ATA_MAX_DEVICES; i++)
1367 classes[i] = ap->device[i].class;
1368
1369 ata_port_probe(ap);
1370 }
1492 1371
1372 for (i = 0; i < ATA_MAX_DEVICES; i++)
1373 if (classes[i] == ATA_DEV_UNKNOWN)
1374 classes[i] = ATA_DEV_NONE;
1375
1376 /* read IDENTIFY page and configure devices */
1493 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1377 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1494 ata_dev_identify(ap, i); 1378 struct ata_device *dev = &ap->device[i];
1495 if (ata_dev_present(&ap->device[i])) { 1379
1496 found = 1; 1380 dev->class = classes[i];
1497 ata_dev_config(ap,i); 1381
1382 if (!ata_dev_present(dev))
1383 continue;
1384
1385 WARN_ON(dev->id != NULL);
1386 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1387 dev->class = ATA_DEV_NONE;
1388 continue;
1389 }
1390
1391 if (ata_dev_configure(ap, dev, 1)) {
1392 dev->class++; /* disable device */
1393 continue;
1498 } 1394 }
1395
1396 found = 1;
1499 } 1397 }
1500 1398
1501 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1399 if (!found)
1502 goto err_out_disable; 1400 goto err_out_disable;
1503 1401
1504 ata_set_mode(ap); 1402 ata_set_mode(ap);
@@ -1509,7 +1407,6 @@ static int ata_bus_probe(struct ata_port *ap)
1509 1407
1510err_out_disable: 1408err_out_disable:
1511 ap->ops->port_disable(ap); 1409 ap->ops->port_disable(ap);
1512err_out:
1513 return -1; 1410 return -1;
1514} 1411}
1515 1412
@@ -1530,6 +1427,41 @@ void ata_port_probe(struct ata_port *ap)
1530} 1427}
1531 1428
1532/** 1429/**
1430 * sata_print_link_status - Print SATA link status
1431 * @ap: SATA port to printk link status about
1432 *
1433 * This function prints link speed and status of a SATA link.
1434 *
1435 * LOCKING:
1436 * None.
1437 */
1438static void sata_print_link_status(struct ata_port *ap)
1439{
1440 u32 sstatus, tmp;
1441 const char *speed;
1442
1443 if (!ap->ops->scr_read)
1444 return;
1445
1446 sstatus = scr_read(ap, SCR_STATUS);
1447
1448 if (sata_dev_present(ap)) {
1449 tmp = (sstatus >> 4) & 0xf;
1450 if (tmp & (1 << 0))
1451 speed = "1.5";
1452 else if (tmp & (1 << 1))
1453 speed = "3.0";
1454 else
1455 speed = "<unknown>";
1456 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1457 ap->id, speed, sstatus);
1458 } else {
1459 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1460 ap->id, sstatus);
1461 }
1462}
1463
1464/**
1533 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1465 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1534 * @ap: SATA port associated with target SATA PHY. 1466 * @ap: SATA port associated with target SATA PHY.
1535 * 1467 *
@@ -1563,27 +1495,14 @@ void __sata_phy_reset(struct ata_port *ap)
1563 break; 1495 break;
1564 } while (time_before(jiffies, timeout)); 1496 } while (time_before(jiffies, timeout));
1565 1497
1566 /* TODO: phy layer with polling, timeouts, etc. */ 1498 /* print link status */
1567 sstatus = scr_read(ap, SCR_STATUS); 1499 sata_print_link_status(ap);
1568 if (sata_dev_present(ap)) {
1569 const char *speed;
1570 u32 tmp;
1571 1500
1572 tmp = (sstatus >> 4) & 0xf; 1501 /* TODO: phy layer with polling, timeouts, etc. */
1573 if (tmp & (1 << 0)) 1502 if (sata_dev_present(ap))
1574 speed = "1.5";
1575 else if (tmp & (1 << 1))
1576 speed = "3.0";
1577 else
1578 speed = "<unknown>";
1579 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1580 ap->id, speed, sstatus);
1581 ata_port_probe(ap); 1503 ata_port_probe(ap);
1582 } else { 1504 else
1583 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1584 ap->id, sstatus);
1585 ata_port_disable(ap); 1505 ata_port_disable(ap);
1586 }
1587 1506
1588 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1507 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1589 return; 1508 return;
@@ -1756,9 +1675,9 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1756 ata_timing_quantize(t, t, T, UT); 1675 ata_timing_quantize(t, t, T, UT);
1757 1676
1758 /* 1677 /*
1759 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1678 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1760 * and some other commands. We have to ensure that the DMA cycle timing is 1679 * S.M.A.R.T * and some other commands. We have to ensure that the
1761 * slower/equal than the fastest PIO timing. 1680 * DMA cycle timing is slower/equal than the fastest PIO timing.
1762 */ 1681 */
1763 1682
1764 if (speed > XFER_PIO_4) { 1683 if (speed > XFER_PIO_4) {
@@ -1767,7 +1686,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1767 } 1686 }
1768 1687
1769 /* 1688 /*
1770 * Lenghten active & recovery time so that cycle time is correct. 1689 * Lengthen active & recovery time so that cycle time is correct.
1771 */ 1690 */
1772 1691
1773 if (t->act8b + t->rec8b < t->cyc8b) { 1692 if (t->act8b + t->rec8b < t->cyc8b) {
@@ -1783,31 +1702,8 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1783 return 0; 1702 return 0;
1784} 1703}
1785 1704
1786static const struct {
1787 unsigned int shift;
1788 u8 base;
1789} xfer_mode_classes[] = {
1790 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1791 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1792 { ATA_SHIFT_PIO, XFER_PIO_0 },
1793};
1794
1795static u8 base_from_shift(unsigned int shift)
1796{
1797 int i;
1798
1799 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1800 if (xfer_mode_classes[i].shift == shift)
1801 return xfer_mode_classes[i].base;
1802
1803 return 0xff;
1804}
1805
1806static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1705static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1807{ 1706{
1808 int ofs, idx;
1809 u8 base;
1810
1811 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1707 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1812 return; 1708 return;
1813 1709
@@ -1816,65 +1712,58 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1816 1712
1817 ata_dev_set_xfermode(ap, dev); 1713 ata_dev_set_xfermode(ap, dev);
1818 1714
1819 base = base_from_shift(dev->xfer_shift); 1715 if (ata_dev_revalidate(ap, dev, 0)) {
1820 ofs = dev->xfer_mode - base; 1716 printk(KERN_ERR "ata%u: failed to revalidate after set "
1821 idx = ofs + dev->xfer_shift; 1717 "xfermode, disabled\n", ap->id);
1822 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str)); 1718 ata_port_disable(ap);
1719 }
1823 1720
1824 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n", 1721 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1825 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs); 1722 dev->xfer_shift, (int)dev->xfer_mode);
1826 1723
1827 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 1724 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1828 ap->id, dev->devno, xfer_mode_str[idx]); 1725 ap->id, dev->devno,
1726 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1829} 1727}
1830 1728
1831static int ata_host_set_pio(struct ata_port *ap) 1729static int ata_host_set_pio(struct ata_port *ap)
1832{ 1730{
1833 unsigned int mask; 1731 int i;
1834 int x, i;
1835 u8 base, xfer_mode;
1836
1837 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1838 x = fgb(mask);
1839 if (x < 0) {
1840 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1841 return -1;
1842 }
1843
1844 base = base_from_shift(ATA_SHIFT_PIO);
1845 xfer_mode = base + x;
1846
1847 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1848 (int)base, (int)xfer_mode, mask, x);
1849 1732
1850 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1733 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1851 struct ata_device *dev = &ap->device[i]; 1734 struct ata_device *dev = &ap->device[i];
1852 if (ata_dev_present(dev)) { 1735
1853 dev->pio_mode = xfer_mode; 1736 if (!ata_dev_present(dev))
1854 dev->xfer_mode = xfer_mode; 1737 continue;
1855 dev->xfer_shift = ATA_SHIFT_PIO; 1738
1856 if (ap->ops->set_piomode) 1739 if (!dev->pio_mode) {
1857 ap->ops->set_piomode(ap, dev); 1740 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1741 return -1;
1858 } 1742 }
1743
1744 dev->xfer_mode = dev->pio_mode;
1745 dev->xfer_shift = ATA_SHIFT_PIO;
1746 if (ap->ops->set_piomode)
1747 ap->ops->set_piomode(ap, dev);
1859 } 1748 }
1860 1749
1861 return 0; 1750 return 0;
1862} 1751}
1863 1752
1864static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, 1753static void ata_host_set_dma(struct ata_port *ap)
1865 unsigned int xfer_shift)
1866{ 1754{
1867 int i; 1755 int i;
1868 1756
1869 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1757 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1870 struct ata_device *dev = &ap->device[i]; 1758 struct ata_device *dev = &ap->device[i];
1871 if (ata_dev_present(dev)) { 1759
1872 dev->dma_mode = xfer_mode; 1760 if (!ata_dev_present(dev) || !dev->dma_mode)
1873 dev->xfer_mode = xfer_mode; 1761 continue;
1874 dev->xfer_shift = xfer_shift; 1762
1875 if (ap->ops->set_dmamode) 1763 dev->xfer_mode = dev->dma_mode;
1876 ap->ops->set_dmamode(ap, dev); 1764 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1877 } 1765 if (ap->ops->set_dmamode)
1766 ap->ops->set_dmamode(ap, dev);
1878 } 1767 }
1879} 1768}
1880 1769
@@ -1886,32 +1775,37 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1886 * 1775 *
1887 * LOCKING: 1776 * LOCKING:
1888 * PCI/etc. bus probe sem. 1777 * PCI/etc. bus probe sem.
1889 *
1890 */ 1778 */
1891static void ata_set_mode(struct ata_port *ap) 1779static void ata_set_mode(struct ata_port *ap)
1892{ 1780{
1893 unsigned int xfer_shift; 1781 int i, rc;
1894 u8 xfer_mode;
1895 int rc;
1896 1782
1897 /* step 1: always set host PIO timings */ 1783 /* step 1: calculate xfer_mask */
1898 rc = ata_host_set_pio(ap); 1784 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1899 if (rc) 1785 struct ata_device *dev = &ap->device[i];
1900 goto err_out; 1786 unsigned int xfer_mask;
1901 1787
1902 /* step 2: choose the best data xfer mode */ 1788 if (!ata_dev_present(dev))
1903 xfer_mode = xfer_shift = 0; 1789 continue;
1904 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift); 1790
1791 xfer_mask = ata_dev_xfermask(ap, dev);
1792
1793 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1794 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1795 ATA_MASK_UDMA));
1796 }
1797
1798 /* step 2: always set host PIO timings */
1799 rc = ata_host_set_pio(ap);
1905 if (rc) 1800 if (rc)
1906 goto err_out; 1801 goto err_out;
1907 1802
1908 /* step 3: if that xfer mode isn't PIO, set host DMA timings */ 1803 /* step 3: set host DMA timings */
1909 if (xfer_shift != ATA_SHIFT_PIO) 1804 ata_host_set_dma(ap);
1910 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1911 1805
1912 /* step 4: update devices' xfer mode */ 1806 /* step 4: update devices' xfer mode */
1913 ata_dev_set_mode(ap, &ap->device[0]); 1807 for (i = 0; i < ATA_MAX_DEVICES; i++)
1914 ata_dev_set_mode(ap, &ap->device[1]); 1808 ata_dev_set_mode(ap, &ap->device[i]);
1915 1809
1916 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1810 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1917 return; 1811 return;
@@ -1926,6 +1820,26 @@ err_out:
1926} 1820}
1927 1821
1928/** 1822/**
1823 * ata_tf_to_host - issue ATA taskfile to host controller
1824 * @ap: port to which command is being issued
1825 * @tf: ATA taskfile register set
1826 *
1827 * Issues ATA taskfile register set to ATA host controller,
1828 * with proper synchronization with interrupt handler and
1829 * other threads.
1830 *
1831 * LOCKING:
1832 * spin_lock_irqsave(host_set lock)
1833 */
1834
1835static inline void ata_tf_to_host(struct ata_port *ap,
1836 const struct ata_taskfile *tf)
1837{
1838 ap->ops->tf_load(ap, tf);
1839 ap->ops->exec_command(ap, tf);
1840}
1841
1842/**
1929 * ata_busy_sleep - sleep until BSY clears, or timeout 1843 * ata_busy_sleep - sleep until BSY clears, or timeout
1930 * @ap: port containing status register to be polled 1844 * @ap: port containing status register to be polled
1931 * @tmout_pat: impatience timeout 1845 * @tmout_pat: impatience timeout
@@ -1935,12 +1849,10 @@ err_out:
1935 * or a timeout occurs. 1849 * or a timeout occurs.
1936 * 1850 *
1937 * LOCKING: None. 1851 * LOCKING: None.
1938 *
1939 */ 1852 */
1940 1853
1941static unsigned int ata_busy_sleep (struct ata_port *ap, 1854unsigned int ata_busy_sleep (struct ata_port *ap,
1942 unsigned long tmout_pat, 1855 unsigned long tmout_pat, unsigned long tmout)
1943 unsigned long tmout)
1944{ 1856{
1945 unsigned long timer_start, timeout; 1857 unsigned long timer_start, timeout;
1946 u8 status; 1858 u8 status;
@@ -2159,9 +2071,9 @@ void ata_bus_reset(struct ata_port *ap)
2159 /* 2071 /*
2160 * determine by signature whether we have ATA or ATAPI devices 2072 * determine by signature whether we have ATA or ATAPI devices
2161 */ 2073 */
2162 err = ata_dev_try_classify(ap, 0); 2074 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2163 if ((slave_possible) && (err != 0x81)) 2075 if ((slave_possible) && (err != 0x81))
2164 ata_dev_try_classify(ap, 1); 2076 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2165 2077
2166 /* re-enable interrupts */ 2078 /* re-enable interrupts */
2167 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2079 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
@@ -2196,11 +2108,446 @@ err_out:
2196 DPRINTK("EXIT\n"); 2108 DPRINTK("EXIT\n");
2197} 2109}
2198 2110
2199static void ata_pr_blacklisted(const struct ata_port *ap, 2111static int sata_phy_resume(struct ata_port *ap)
2200 const struct ata_device *dev)
2201{ 2112{
2202 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", 2113 unsigned long timeout = jiffies + (HZ * 5);
2203 ap->id, dev->devno); 2114 u32 sstatus;
2115
2116 scr_write_flush(ap, SCR_CONTROL, 0x300);
2117
2118 /* Wait for phy to become ready, if necessary. */
2119 do {
2120 msleep(200);
2121 sstatus = scr_read(ap, SCR_STATUS);
2122 if ((sstatus & 0xf) != 1)
2123 return 0;
2124 } while (time_before(jiffies, timeout));
2125
2126 return -1;
2127}
2128
2129/**
2130 * ata_std_probeinit - initialize probing
2131 * @ap: port to be probed
2132 *
2133 * @ap is about to be probed. Initialize it. This function is
2134 * to be used as standard callback for ata_drive_probe_reset().
2135 *
2136 * NOTE!!! Do not use this function as probeinit if a low level
2137 * driver implements only hardreset. Just pass NULL as probeinit
2138 * in that case. Using this function is probably okay but doing
2139 * so makes reset sequence different from the original
2140 * ->phy_reset implementation and Jeff nervous. :-P
2141 */
2142extern void ata_std_probeinit(struct ata_port *ap)
2143{
2144 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2145 sata_phy_resume(ap);
2146 if (sata_dev_present(ap))
2147 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2148 }
2149}
2150
2151/**
2152 * ata_std_softreset - reset host port via ATA SRST
2153 * @ap: port to reset
2154 * @verbose: fail verbosely
2155 * @classes: resulting classes of attached devices
2156 *
2157 * Reset host port using ATA SRST. This function is to be used
2158 * as standard callback for ata_drive_*_reset() functions.
2159 *
2160 * LOCKING:
2161 * Kernel thread context (may sleep)
2162 *
2163 * RETURNS:
2164 * 0 on success, -errno otherwise.
2165 */
2166int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2167{
2168 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2169 unsigned int devmask = 0, err_mask;
2170 u8 err;
2171
2172 DPRINTK("ENTER\n");
2173
2174 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2175 classes[0] = ATA_DEV_NONE;
2176 goto out;
2177 }
2178
2179 /* determine if device 0/1 are present */
2180 if (ata_devchk(ap, 0))
2181 devmask |= (1 << 0);
2182 if (slave_possible && ata_devchk(ap, 1))
2183 devmask |= (1 << 1);
2184
2185 /* select device 0 again */
2186 ap->ops->dev_select(ap, 0);
2187
2188 /* issue bus reset */
2189 DPRINTK("about to softreset, devmask=%x\n", devmask);
2190 err_mask = ata_bus_softreset(ap, devmask);
2191 if (err_mask) {
2192 if (verbose)
2193 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2194 ap->id, err_mask);
2195 else
2196 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2197 err_mask);
2198 return -EIO;
2199 }
2200
2201 /* determine by signature whether we have ATA or ATAPI devices */
2202 classes[0] = ata_dev_try_classify(ap, 0, &err);
2203 if (slave_possible && err != 0x81)
2204 classes[1] = ata_dev_try_classify(ap, 1, &err);
2205
2206 out:
2207 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2208 return 0;
2209}
2210
2211/**
2212 * sata_std_hardreset - reset host port via SATA phy reset
2213 * @ap: port to reset
2214 * @verbose: fail verbosely
2215 * @class: resulting class of attached device
2216 *
2217 * SATA phy-reset host port using DET bits of SControl register.
2218 * This function is to be used as standard callback for
2219 * ata_drive_*_reset().
2220 *
2221 * LOCKING:
2222 * Kernel thread context (may sleep)
2223 *
2224 * RETURNS:
2225 * 0 on success, -errno otherwise.
2226 */
2227int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2228{
2229 DPRINTK("ENTER\n");
2230
2231 /* Issue phy wake/reset */
2232 scr_write_flush(ap, SCR_CONTROL, 0x301);
2233
2234 /*
2235 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2236 * 10.4.2 says at least 1 ms.
2237 */
2238 msleep(1);
2239
2240 /* Bring phy back */
2241 sata_phy_resume(ap);
2242
2243 /* TODO: phy layer with polling, timeouts, etc. */
2244 if (!sata_dev_present(ap)) {
2245 *class = ATA_DEV_NONE;
2246 DPRINTK("EXIT, link offline\n");
2247 return 0;
2248 }
2249
2250 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2251 if (verbose)
2252 printk(KERN_ERR "ata%u: COMRESET failed "
2253 "(device not ready)\n", ap->id);
2254 else
2255 DPRINTK("EXIT, device not ready\n");
2256 return -EIO;
2257 }
2258
2259 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2260
2261 *class = ata_dev_try_classify(ap, 0, NULL);
2262
2263 DPRINTK("EXIT, class=%u\n", *class);
2264 return 0;
2265}
2266
2267/**
2268 * ata_std_postreset - standard postreset callback
2269 * @ap: the target ata_port
2270 * @classes: classes of attached devices
2271 *
2272 * This function is invoked after a successful reset. Note that
2273 * the device might have been reset more than once using
2274 * different reset methods before postreset is invoked.
2275 *
2276 * This function is to be used as standard callback for
2277 * ata_drive_*_reset().
2278 *
2279 * LOCKING:
2280 * Kernel thread context (may sleep)
2281 */
2282void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2283{
2284 DPRINTK("ENTER\n");
2285
2286 /* set cable type if it isn't already set */
2287 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2288 ap->cbl = ATA_CBL_SATA;
2289
2290 /* print link status */
2291 if (ap->cbl == ATA_CBL_SATA)
2292 sata_print_link_status(ap);
2293
2294 /* re-enable interrupts */
2295 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2296 ata_irq_on(ap);
2297
2298 /* is double-select really necessary? */
2299 if (classes[0] != ATA_DEV_NONE)
2300 ap->ops->dev_select(ap, 1);
2301 if (classes[1] != ATA_DEV_NONE)
2302 ap->ops->dev_select(ap, 0);
2303
2304 /* bail out if no device is present */
2305 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2306 DPRINTK("EXIT, no device\n");
2307 return;
2308 }
2309
2310 /* set up device control */
2311 if (ap->ioaddr.ctl_addr) {
2312 if (ap->flags & ATA_FLAG_MMIO)
2313 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2314 else
2315 outb(ap->ctl, ap->ioaddr.ctl_addr);
2316 }
2317
2318 DPRINTK("EXIT\n");
2319}
2320
2321/**
2322 * ata_std_probe_reset - standard probe reset method
2323 * @ap: prot to perform probe-reset
2324 * @classes: resulting classes of attached devices
2325 *
2326 * The stock off-the-shelf ->probe_reset method.
2327 *
2328 * LOCKING:
2329 * Kernel thread context (may sleep)
2330 *
2331 * RETURNS:
2332 * 0 on success, -errno otherwise.
2333 */
2334int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2335{
2336 ata_reset_fn_t hardreset;
2337
2338 hardreset = NULL;
2339 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2340 hardreset = sata_std_hardreset;
2341
2342 return ata_drive_probe_reset(ap, ata_std_probeinit,
2343 ata_std_softreset, hardreset,
2344 ata_std_postreset, classes);
2345}
2346
2347static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2348 ata_postreset_fn_t postreset,
2349 unsigned int *classes)
2350{
2351 int i, rc;
2352
2353 for (i = 0; i < ATA_MAX_DEVICES; i++)
2354 classes[i] = ATA_DEV_UNKNOWN;
2355
2356 rc = reset(ap, 0, classes);
2357 if (rc)
2358 return rc;
2359
2360 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2361 * is complete and convert all ATA_DEV_UNKNOWN to
2362 * ATA_DEV_NONE.
2363 */
2364 for (i = 0; i < ATA_MAX_DEVICES; i++)
2365 if (classes[i] != ATA_DEV_UNKNOWN)
2366 break;
2367
2368 if (i < ATA_MAX_DEVICES)
2369 for (i = 0; i < ATA_MAX_DEVICES; i++)
2370 if (classes[i] == ATA_DEV_UNKNOWN)
2371 classes[i] = ATA_DEV_NONE;
2372
2373 if (postreset)
2374 postreset(ap, classes);
2375
2376 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2377}
2378
2379/**
2380 * ata_drive_probe_reset - Perform probe reset with given methods
2381 * @ap: port to reset
2382 * @probeinit: probeinit method (can be NULL)
2383 * @softreset: softreset method (can be NULL)
2384 * @hardreset: hardreset method (can be NULL)
2385 * @postreset: postreset method (can be NULL)
2386 * @classes: resulting classes of attached devices
2387 *
2388 * Reset the specified port and classify attached devices using
2389 * given methods. This function prefers softreset but tries all
2390 * possible reset sequences to reset and classify devices. This
2391 * function is intended to be used for constructing ->probe_reset
2392 * callback by low level drivers.
2393 *
2394 * Reset methods should follow the following rules.
2395 *
2396 * - Return 0 on sucess, -errno on failure.
2397 * - If classification is supported, fill classes[] with
2398 * recognized class codes.
2399 * - If classification is not supported, leave classes[] alone.
2400 * - If verbose is non-zero, print error message on failure;
2401 * otherwise, shut up.
2402 *
2403 * LOCKING:
2404 * Kernel thread context (may sleep)
2405 *
2406 * RETURNS:
2407 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2408 * if classification fails, and any error code from reset
2409 * methods.
2410 */
2411int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2412 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2413 ata_postreset_fn_t postreset, unsigned int *classes)
2414{
2415 int rc = -EINVAL;
2416
2417 if (probeinit)
2418 probeinit(ap);
2419
2420 if (softreset) {
2421 rc = do_probe_reset(ap, softreset, postreset, classes);
2422 if (rc == 0)
2423 return 0;
2424 }
2425
2426 if (!hardreset)
2427 return rc;
2428
2429 rc = do_probe_reset(ap, hardreset, postreset, classes);
2430 if (rc == 0 || rc != -ENODEV)
2431 return rc;
2432
2433 if (softreset)
2434 rc = do_probe_reset(ap, softreset, postreset, classes);
2435
2436 return rc;
2437}
2438
2439/**
2440 * ata_dev_same_device - Determine whether new ID matches configured device
2441 * @ap: port on which the device to compare against resides
2442 * @dev: device to compare against
2443 * @new_class: class of the new device
2444 * @new_id: IDENTIFY page of the new device
2445 *
2446 * Compare @new_class and @new_id against @dev and determine
2447 * whether @dev is the device indicated by @new_class and
2448 * @new_id.
2449 *
2450 * LOCKING:
2451 * None.
2452 *
2453 * RETURNS:
2454 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2455 */
2456static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2457 unsigned int new_class, const u16 *new_id)
2458{
2459 const u16 *old_id = dev->id;
2460 unsigned char model[2][41], serial[2][21];
2461 u64 new_n_sectors;
2462
2463 if (dev->class != new_class) {
2464 printk(KERN_INFO
2465 "ata%u: dev %u class mismatch %d != %d\n",
2466 ap->id, dev->devno, dev->class, new_class);
2467 return 0;
2468 }
2469
2470 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2471 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2472 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2473 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2474 new_n_sectors = ata_id_n_sectors(new_id);
2475
2476 if (strcmp(model[0], model[1])) {
2477 printk(KERN_INFO
2478 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2479 ap->id, dev->devno, model[0], model[1]);
2480 return 0;
2481 }
2482
2483 if (strcmp(serial[0], serial[1])) {
2484 printk(KERN_INFO
2485 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2486 ap->id, dev->devno, serial[0], serial[1]);
2487 return 0;
2488 }
2489
2490 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2491 printk(KERN_INFO
2492 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2493 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2494 (unsigned long long)new_n_sectors);
2495 return 0;
2496 }
2497
2498 return 1;
2499}
2500
2501/**
2502 * ata_dev_revalidate - Revalidate ATA device
2503 * @ap: port on which the device to revalidate resides
2504 * @dev: device to revalidate
2505 * @post_reset: is this revalidation after reset?
2506 *
2507 * Re-read IDENTIFY page and make sure @dev is still attached to
2508 * the port.
2509 *
2510 * LOCKING:
2511 * Kernel thread context (may sleep)
2512 *
2513 * RETURNS:
2514 * 0 on success, negative errno otherwise
2515 */
2516int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2517 int post_reset)
2518{
2519 unsigned int class;
2520 u16 *id;
2521 int rc;
2522
2523 if (!ata_dev_present(dev))
2524 return -ENODEV;
2525
2526 class = dev->class;
2527 id = NULL;
2528
2529 /* allocate & read ID data */
2530 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2531 if (rc)
2532 goto fail;
2533
2534 /* is the device still there? */
2535 if (!ata_dev_same_device(ap, dev, class, id)) {
2536 rc = -ENODEV;
2537 goto fail;
2538 }
2539
2540 kfree(dev->id);
2541 dev->id = id;
2542
2543 /* configure device according to the new ID */
2544 return ata_dev_configure(ap, dev, 0);
2545
2546 fail:
2547 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2548 ap->id, dev->devno, rc);
2549 kfree(id);
2550 return rc;
2204} 2551}
2205 2552
2206static const char * const ata_dma_blacklist [] = { 2553static const char * const ata_dma_blacklist [] = {
@@ -2237,151 +2584,57 @@ static const char * const ata_dma_blacklist [] = {
2237 2584
2238static int ata_dma_blacklisted(const struct ata_device *dev) 2585static int ata_dma_blacklisted(const struct ata_device *dev)
2239{ 2586{
2240 unsigned char model_num[40]; 2587 unsigned char model_num[41];
2241 char *s;
2242 unsigned int len;
2243 int i; 2588 int i;
2244 2589
2245 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2590 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2246 sizeof(model_num));
2247 s = &model_num[0];
2248 len = strnlen(s, sizeof(model_num));
2249
2250 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2251 while ((len > 0) && (s[len - 1] == ' ')) {
2252 len--;
2253 s[len] = 0;
2254 }
2255 2591
2256 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2592 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2257 if (!strncmp(ata_dma_blacklist[i], s, len)) 2593 if (!strcmp(ata_dma_blacklist[i], model_num))
2258 return 1; 2594 return 1;
2259 2595
2260 return 0; 2596 return 0;
2261} 2597}
2262 2598
2263static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2264{
2265 const struct ata_device *master, *slave;
2266 unsigned int mask;
2267
2268 master = &ap->device[0];
2269 slave = &ap->device[1];
2270
2271 assert (ata_dev_present(master) || ata_dev_present(slave));
2272
2273 if (shift == ATA_SHIFT_UDMA) {
2274 mask = ap->udma_mask;
2275 if (ata_dev_present(master)) {
2276 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2277 if (ata_dma_blacklisted(master)) {
2278 mask = 0;
2279 ata_pr_blacklisted(ap, master);
2280 }
2281 }
2282 if (ata_dev_present(slave)) {
2283 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2284 if (ata_dma_blacklisted(slave)) {
2285 mask = 0;
2286 ata_pr_blacklisted(ap, slave);
2287 }
2288 }
2289 }
2290 else if (shift == ATA_SHIFT_MWDMA) {
2291 mask = ap->mwdma_mask;
2292 if (ata_dev_present(master)) {
2293 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2294 if (ata_dma_blacklisted(master)) {
2295 mask = 0;
2296 ata_pr_blacklisted(ap, master);
2297 }
2298 }
2299 if (ata_dev_present(slave)) {
2300 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2301 if (ata_dma_blacklisted(slave)) {
2302 mask = 0;
2303 ata_pr_blacklisted(ap, slave);
2304 }
2305 }
2306 }
2307 else if (shift == ATA_SHIFT_PIO) {
2308 mask = ap->pio_mask;
2309 if (ata_dev_present(master)) {
2310 /* spec doesn't return explicit support for
2311 * PIO0-2, so we fake it
2312 */
2313 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2314 tmp_mode <<= 3;
2315 tmp_mode |= 0x7;
2316 mask &= tmp_mode;
2317 }
2318 if (ata_dev_present(slave)) {
2319 /* spec doesn't return explicit support for
2320 * PIO0-2, so we fake it
2321 */
2322 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2323 tmp_mode <<= 3;
2324 tmp_mode |= 0x7;
2325 mask &= tmp_mode;
2326 }
2327 }
2328 else {
2329 mask = 0xffffffff; /* shut up compiler warning */
2330 BUG();
2331 }
2332
2333 return mask;
2334}
2335
2336/* find greatest bit */
2337static int fgb(u32 bitmap)
2338{
2339 unsigned int i;
2340 int x = -1;
2341
2342 for (i = 0; i < 32; i++)
2343 if (bitmap & (1 << i))
2344 x = i;
2345
2346 return x;
2347}
2348
2349/** 2599/**
2350 * ata_choose_xfer_mode - attempt to find best transfer mode 2600 * ata_dev_xfermask - Compute supported xfermask of the given device
2351 * @ap: Port for which an xfer mode will be selected 2601 * @ap: Port on which the device to compute xfermask for resides
2352 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code 2602 * @dev: Device to compute xfermask for
2353 * @xfer_shift_out: (output) bit shift that selects this mode
2354 * 2603 *
2355 * Based on host and device capabilities, determine the 2604 * Compute supported xfermask of @dev. This function is
2356 * maximum transfer mode that is amenable to all. 2605 * responsible for applying all known limits including host
2606 * controller limits, device blacklist, etc...
2357 * 2607 *
2358 * LOCKING: 2608 * LOCKING:
2359 * PCI/etc. bus probe sem. 2609 * None.
2360 * 2610 *
2361 * RETURNS: 2611 * RETURNS:
2362 * Zero on success, negative on error. 2612 * Computed xfermask.
2363 */ 2613 */
2364 2614static unsigned int ata_dev_xfermask(struct ata_port *ap,
2365static int ata_choose_xfer_mode(const struct ata_port *ap, 2615 struct ata_device *dev)
2366 u8 *xfer_mode_out,
2367 unsigned int *xfer_shift_out)
2368{ 2616{
2369 unsigned int mask, shift; 2617 unsigned long xfer_mask;
2370 int x, i; 2618 int i;
2371 2619
2372 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) { 2620 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2373 shift = xfer_mode_classes[i].shift; 2621 ap->udma_mask);
2374 mask = ata_get_mode_mask(ap, shift);
2375 2622
2376 x = fgb(mask); 2623 /* use port-wide xfermask for now */
2377 if (x >= 0) { 2624 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2378 *xfer_mode_out = xfer_mode_classes[i].base + x; 2625 struct ata_device *d = &ap->device[i];
2379 *xfer_shift_out = shift; 2626 if (!ata_dev_present(d))
2380 return 0; 2627 continue;
2381 } 2628 xfer_mask &= ata_id_xfermask(d->id);
2629 if (ata_dma_blacklisted(d))
2630 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2382 } 2631 }
2383 2632
2384 return -1; 2633 if (ata_dma_blacklisted(dev))
2634 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2635 "disabling DMA\n", ap->id, dev->devno);
2636
2637 return xfer_mask;
2385} 2638}
2386 2639
2387/** 2640/**
@@ -2420,63 +2673,28 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2420} 2673}
2421 2674
2422/** 2675/**
2423 * ata_dev_reread_id - Reread the device identify device info
2424 * @ap: port where the device is
2425 * @dev: device to reread the identify device info
2426 *
2427 * LOCKING:
2428 */
2429
2430static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2431{
2432 struct ata_taskfile tf;
2433
2434 ata_tf_init(ap, &tf, dev->devno);
2435
2436 if (dev->class == ATA_DEV_ATA) {
2437 tf.command = ATA_CMD_ID_ATA;
2438 DPRINTK("do ATA identify\n");
2439 } else {
2440 tf.command = ATA_CMD_ID_ATAPI;
2441 DPRINTK("do ATAPI identify\n");
2442 }
2443
2444 tf.flags |= ATA_TFLAG_DEVICE;
2445 tf.protocol = ATA_PROT_PIO;
2446
2447 if (ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
2448 dev->id, sizeof(dev->id)))
2449 goto err_out;
2450
2451 swap_buf_le16(dev->id, ATA_ID_WORDS);
2452
2453 ata_dump_id(dev);
2454
2455 DPRINTK("EXIT\n");
2456
2457 return;
2458err_out:
2459 printk(KERN_ERR "ata%u: failed to reread ID, disabled\n", ap->id);
2460 ata_port_disable(ap);
2461}
2462
2463/**
2464 * ata_dev_init_params - Issue INIT DEV PARAMS command 2676 * ata_dev_init_params - Issue INIT DEV PARAMS command
2465 * @ap: Port associated with device @dev 2677 * @ap: Port associated with device @dev
2466 * @dev: Device to which command will be sent 2678 * @dev: Device to which command will be sent
2467 * 2679 *
2468 * LOCKING: 2680 * LOCKING:
2681 * Kernel thread context (may sleep)
2682 *
2683 * RETURNS:
2684 * 0 on success, AC_ERR_* mask otherwise.
2469 */ 2685 */
2470 2686
2471static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) 2687static unsigned int ata_dev_init_params(struct ata_port *ap,
2688 struct ata_device *dev)
2472{ 2689{
2473 struct ata_taskfile tf; 2690 struct ata_taskfile tf;
2691 unsigned int err_mask;
2474 u16 sectors = dev->id[6]; 2692 u16 sectors = dev->id[6];
2475 u16 heads = dev->id[3]; 2693 u16 heads = dev->id[3];
2476 2694
2477 /* Number of sectors per track 1-255. Number of heads 1-16 */ 2695 /* Number of sectors per track 1-255. Number of heads 1-16 */
2478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 2696 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2479 return; 2697 return 0;
2480 2698
2481 /* set up init dev params taskfile */ 2699 /* set up init dev params taskfile */
2482 DPRINTK("init dev params \n"); 2700 DPRINTK("init dev params \n");
@@ -2488,13 +2706,10 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2488 tf.nsect = sectors; 2706 tf.nsect = sectors;
2489 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 2707 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2490 2708
2491 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { 2709 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2492 printk(KERN_ERR "ata%u: failed to init parameters, disabled\n",
2493 ap->id);
2494 ata_port_disable(ap);
2495 }
2496 2710
2497 DPRINTK("EXIT\n"); 2711 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2712 return err_mask;
2498} 2713}
2499 2714
2500/** 2715/**
@@ -2514,11 +2729,11 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2514 int dir = qc->dma_dir; 2729 int dir = qc->dma_dir;
2515 void *pad_buf = NULL; 2730 void *pad_buf = NULL;
2516 2731
2517 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2732 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2518 assert(sg != NULL); 2733 WARN_ON(sg == NULL);
2519 2734
2520 if (qc->flags & ATA_QCFLAG_SINGLE) 2735 if (qc->flags & ATA_QCFLAG_SINGLE)
2521 assert(qc->n_elem <= 1); 2736 WARN_ON(qc->n_elem > 1);
2522 2737
2523 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2738 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2524 2739
@@ -2573,8 +2788,8 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2573 struct scatterlist *sg; 2788 struct scatterlist *sg;
2574 unsigned int idx; 2789 unsigned int idx;
2575 2790
2576 assert(qc->__sg != NULL); 2791 WARN_ON(qc->__sg == NULL);
2577 assert(qc->n_elem > 0 || qc->pad_len > 0); 2792 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2578 2793
2579 idx = 0; 2794 idx = 0;
2580 ata_for_each_sg(sg, qc) { 2795 ata_for_each_sg(sg, qc) {
@@ -2727,7 +2942,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2727 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2942 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2728 struct scatterlist *psg = &qc->pad_sgent; 2943 struct scatterlist *psg = &qc->pad_sgent;
2729 2944
2730 assert(qc->dev->class == ATA_DEV_ATAPI); 2945 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2731 2946
2732 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2947 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2733 2948
@@ -2791,7 +3006,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2791 int n_elem, pre_n_elem, dir, trim_sg = 0; 3006 int n_elem, pre_n_elem, dir, trim_sg = 0;
2792 3007
2793 VPRINTK("ENTER, ata%u\n", ap->id); 3008 VPRINTK("ENTER, ata%u\n", ap->id);
2794 assert(qc->flags & ATA_QCFLAG_SG); 3009 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2795 3010
2796 /* we must lengthen transfers to end on a 32-bit boundary */ 3011 /* we must lengthen transfers to end on a 32-bit boundary */
2797 qc->pad_len = lsg->length & 3; 3012 qc->pad_len = lsg->length & 3;
@@ -2800,7 +3015,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2800 struct scatterlist *psg = &qc->pad_sgent; 3015 struct scatterlist *psg = &qc->pad_sgent;
2801 unsigned int offset; 3016 unsigned int offset;
2802 3017
2803 assert(qc->dev->class == ATA_DEV_ATAPI); 3018 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2804 3019
2805 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 3020 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2806 3021
@@ -2876,7 +3091,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
2876} 3091}
2877 3092
2878/** 3093/**
2879 * ata_pio_poll - 3094 * ata_pio_poll - poll using PIO, depending on current state
2880 * @ap: the target ata_port 3095 * @ap: the target ata_port
2881 * 3096 *
2882 * LOCKING: 3097 * LOCKING:
@@ -2894,7 +3109,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2894 unsigned int reg_state = HSM_ST_UNKNOWN; 3109 unsigned int reg_state = HSM_ST_UNKNOWN;
2895 3110
2896 qc = ata_qc_from_tag(ap, ap->active_tag); 3111 qc = ata_qc_from_tag(ap, ap->active_tag);
2897 assert(qc != NULL); 3112 WARN_ON(qc == NULL);
2898 3113
2899 switch (ap->hsm_task_state) { 3114 switch (ap->hsm_task_state) {
2900 case HSM_ST: 3115 case HSM_ST:
@@ -2915,7 +3130,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2915 status = ata_chk_status(ap); 3130 status = ata_chk_status(ap);
2916 if (status & ATA_BUSY) { 3131 if (status & ATA_BUSY) {
2917 if (time_after(jiffies, ap->pio_task_timeout)) { 3132 if (time_after(jiffies, ap->pio_task_timeout)) {
2918 qc->err_mask |= AC_ERR_ATA_BUS; 3133 qc->err_mask |= AC_ERR_TIMEOUT;
2919 ap->hsm_task_state = HSM_ST_TMOUT; 3134 ap->hsm_task_state = HSM_ST_TMOUT;
2920 return 0; 3135 return 0;
2921 } 3136 }
@@ -2962,7 +3177,7 @@ static int ata_pio_complete (struct ata_port *ap)
2962 } 3177 }
2963 3178
2964 qc = ata_qc_from_tag(ap, ap->active_tag); 3179 qc = ata_qc_from_tag(ap, ap->active_tag);
2965 assert(qc != NULL); 3180 WARN_ON(qc == NULL);
2966 3181
2967 drv_stat = ata_wait_idle(ap); 3182 drv_stat = ata_wait_idle(ap);
2968 if (!ata_ok(drv_stat)) { 3183 if (!ata_ok(drv_stat)) {
@@ -2973,7 +3188,7 @@ static int ata_pio_complete (struct ata_port *ap)
2973 3188
2974 ap->hsm_task_state = HSM_ST_IDLE; 3189 ap->hsm_task_state = HSM_ST_IDLE;
2975 3190
2976 assert(qc->err_mask == 0); 3191 WARN_ON(qc->err_mask);
2977 ata_poll_qc_complete(qc); 3192 ata_poll_qc_complete(qc);
2978 3193
2979 /* another command may start at this point */ 3194 /* another command may start at this point */
@@ -2983,7 +3198,7 @@ static int ata_pio_complete (struct ata_port *ap)
2983 3198
2984 3199
2985/** 3200/**
2986 * swap_buf_le16 - swap halves of 16-words in place 3201 * swap_buf_le16 - swap halves of 16-bit words in place
2987 * @buf: Buffer to swap 3202 * @buf: Buffer to swap
2988 * @buf_words: Number of 16-bit words in buffer. 3203 * @buf_words: Number of 16-bit words in buffer.
2989 * 3204 *
@@ -3293,7 +3508,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3293err_out: 3508err_out:
3294 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3509 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3295 ap->id, dev->devno); 3510 ap->id, dev->devno);
3296 qc->err_mask |= AC_ERR_ATA_BUS; 3511 qc->err_mask |= AC_ERR_HSM;
3297 ap->hsm_task_state = HSM_ST_ERR; 3512 ap->hsm_task_state = HSM_ST_ERR;
3298} 3513}
3299 3514
@@ -3330,7 +3545,7 @@ static void ata_pio_block(struct ata_port *ap)
3330 } 3545 }
3331 3546
3332 qc = ata_qc_from_tag(ap, ap->active_tag); 3547 qc = ata_qc_from_tag(ap, ap->active_tag);
3333 assert(qc != NULL); 3548 WARN_ON(qc == NULL);
3334 3549
3335 /* check error */ 3550 /* check error */
3336 if (status & (ATA_ERR | ATA_DF)) { 3551 if (status & (ATA_ERR | ATA_DF)) {
@@ -3351,7 +3566,7 @@ static void ata_pio_block(struct ata_port *ap)
3351 } else { 3566 } else {
3352 /* handle BSY=0, DRQ=0 as error */ 3567 /* handle BSY=0, DRQ=0 as error */
3353 if ((status & ATA_DRQ) == 0) { 3568 if ((status & ATA_DRQ) == 0) {
3354 qc->err_mask |= AC_ERR_ATA_BUS; 3569 qc->err_mask |= AC_ERR_HSM;
3355 ap->hsm_task_state = HSM_ST_ERR; 3570 ap->hsm_task_state = HSM_ST_ERR;
3356 return; 3571 return;
3357 } 3572 }
@@ -3365,7 +3580,7 @@ static void ata_pio_error(struct ata_port *ap)
3365 struct ata_queued_cmd *qc; 3580 struct ata_queued_cmd *qc;
3366 3581
3367 qc = ata_qc_from_tag(ap, ap->active_tag); 3582 qc = ata_qc_from_tag(ap, ap->active_tag);
3368 assert(qc != NULL); 3583 WARN_ON(qc == NULL);
3369 3584
3370 if (qc->tf.command != ATA_CMD_PACKET) 3585 if (qc->tf.command != ATA_CMD_PACKET)
3371 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3586 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
@@ -3373,7 +3588,7 @@ static void ata_pio_error(struct ata_port *ap)
3373 /* make sure qc->err_mask is available to 3588 /* make sure qc->err_mask is available to
3374 * know what's wrong and recover 3589 * know what's wrong and recover
3375 */ 3590 */
3376 assert(qc->err_mask); 3591 WARN_ON(qc->err_mask == 0);
3377 3592
3378 ap->hsm_task_state = HSM_ST_IDLE; 3593 ap->hsm_task_state = HSM_ST_IDLE;
3379 3594
@@ -3414,12 +3629,84 @@ fsm_start:
3414 } 3629 }
3415 3630
3416 if (timeout) 3631 if (timeout)
3417 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3632 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3418 else if (!qc_completed) 3633 else if (!qc_completed)
3419 goto fsm_start; 3634 goto fsm_start;
3420} 3635}
3421 3636
3422/** 3637/**
3638 * atapi_packet_task - Write CDB bytes to hardware
3639 * @_data: Port to which ATAPI device is attached.
3640 *
3641 * When device has indicated its readiness to accept
3642 * a CDB, this function is called. Send the CDB.
3643 * If DMA is to be performed, exit immediately.
3644 * Otherwise, we are in polling mode, so poll
3645 * status under operation succeeds or fails.
3646 *
3647 * LOCKING:
3648 * Kernel thread context (may sleep)
3649 */
3650
3651static void atapi_packet_task(void *_data)
3652{
3653 struct ata_port *ap = _data;
3654 struct ata_queued_cmd *qc;
3655 u8 status;
3656
3657 qc = ata_qc_from_tag(ap, ap->active_tag);
3658 WARN_ON(qc == NULL);
3659 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3660
3661 /* sleep-wait for BSY to clear */
3662 DPRINTK("busy wait\n");
3663 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3664 qc->err_mask |= AC_ERR_TIMEOUT;
3665 goto err_out;
3666 }
3667
3668 /* make sure DRQ is set */
3669 status = ata_chk_status(ap);
3670 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3671 qc->err_mask |= AC_ERR_HSM;
3672 goto err_out;
3673 }
3674
3675 /* send SCSI cdb */
3676 DPRINTK("send cdb\n");
3677 WARN_ON(qc->dev->cdb_len < 12);
3678
3679 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3680 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3681 unsigned long flags;
3682
3683 /* Once we're done issuing command and kicking bmdma,
3684 * irq handler takes over. To not lose irq, we need
3685 * to clear NOINTR flag before sending cdb, but
3686 * interrupt handler shouldn't be invoked before we're
3687 * finished. Hence, the following locking.
3688 */
3689 spin_lock_irqsave(&ap->host_set->lock, flags);
3690 ap->flags &= ~ATA_FLAG_NOINTR;
3691 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3692 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3693 ap->ops->bmdma_start(qc); /* initiate bmdma */
3694 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3695 } else {
3696 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3697
3698 /* PIO commands are handled by polling */
3699 ap->hsm_task_state = HSM_ST;
3700 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3701 }
3702
3703 return;
3704
3705err_out:
3706 ata_poll_qc_complete(qc);
3707}
3708
3709/**
3423 * ata_qc_timeout - Handle timeout of queued command 3710 * ata_qc_timeout - Handle timeout of queued command
3424 * @qc: Command that timed out 3711 * @qc: Command that timed out
3425 * 3712 *
@@ -3447,15 +3734,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3447 3734
3448 DPRINTK("ENTER\n"); 3735 DPRINTK("ENTER\n");
3449 3736
3450 spin_lock_irqsave(&host_set->lock, flags); 3737 ap->hsm_task_state = HSM_ST_IDLE;
3451 3738
3452 /* hack alert! We cannot use the supplied completion 3739 spin_lock_irqsave(&host_set->lock, flags);
3453 * function from inside the ->eh_strategy_handler() thread.
3454 * libata is the only user of ->eh_strategy_handler() in
3455 * any kernel, so the default scsi_done() assumes it is
3456 * not being called from the SCSI EH.
3457 */
3458 qc->scsidone = scsi_finish_command;
3459 3740
3460 switch (qc->tf.protocol) { 3741 switch (qc->tf.protocol) {
3461 3742
@@ -3480,12 +3761,13 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3480 3761
3481 /* complete taskfile transaction */ 3762 /* complete taskfile transaction */
3482 qc->err_mask |= ac_err_mask(drv_stat); 3763 qc->err_mask |= ac_err_mask(drv_stat);
3483 ata_qc_complete(qc);
3484 break; 3764 break;
3485 } 3765 }
3486 3766
3487 spin_unlock_irqrestore(&host_set->lock, flags); 3767 spin_unlock_irqrestore(&host_set->lock, flags);
3488 3768
3769 ata_eh_qc_complete(qc);
3770
3489 DPRINTK("EXIT\n"); 3771 DPRINTK("EXIT\n");
3490} 3772}
3491 3773
@@ -3510,20 +3792,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3510 3792
3511void ata_eng_timeout(struct ata_port *ap) 3793void ata_eng_timeout(struct ata_port *ap)
3512{ 3794{
3513 struct ata_queued_cmd *qc;
3514
3515 DPRINTK("ENTER\n"); 3795 DPRINTK("ENTER\n");
3516 3796
3517 qc = ata_qc_from_tag(ap, ap->active_tag); 3797 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3518 if (qc)
3519 ata_qc_timeout(qc);
3520 else {
3521 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3522 ap->id);
3523 goto out;
3524 }
3525 3798
3526out:
3527 DPRINTK("EXIT\n"); 3799 DPRINTK("EXIT\n");
3528} 3800}
3529 3801
@@ -3579,21 +3851,6 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3579 return qc; 3851 return qc;
3580} 3852}
3581 3853
3582static void __ata_qc_complete(struct ata_queued_cmd *qc)
3583{
3584 struct ata_port *ap = qc->ap;
3585 unsigned int tag;
3586
3587 qc->flags = 0;
3588 tag = qc->tag;
3589 if (likely(ata_tag_valid(tag))) {
3590 if (tag == ap->active_tag)
3591 ap->active_tag = ATA_TAG_POISON;
3592 qc->tag = ATA_TAG_POISON;
3593 clear_bit(tag, &ap->qactive);
3594 }
3595}
3596
3597/** 3854/**
3598 * ata_qc_free - free unused ata_queued_cmd 3855 * ata_qc_free - free unused ata_queued_cmd
3599 * @qc: Command to complete 3856 * @qc: Command to complete
@@ -3606,29 +3863,25 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3606 */ 3863 */
3607void ata_qc_free(struct ata_queued_cmd *qc) 3864void ata_qc_free(struct ata_queued_cmd *qc)
3608{ 3865{
3609 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3866 struct ata_port *ap = qc->ap;
3867 unsigned int tag;
3610 3868
3611 __ata_qc_complete(qc); 3869 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3612}
3613 3870
3614/** 3871 qc->flags = 0;
3615 * ata_qc_complete - Complete an active ATA command 3872 tag = qc->tag;
3616 * @qc: Command to complete 3873 if (likely(ata_tag_valid(tag))) {
3617 * @err_mask: ATA Status register contents 3874 if (tag == ap->active_tag)
3618 * 3875 ap->active_tag = ATA_TAG_POISON;
3619 * Indicate to the mid and upper layers that an ATA 3876 qc->tag = ATA_TAG_POISON;
3620 * command has completed, with either an ok or not-ok status. 3877 clear_bit(tag, &ap->qactive);
3621 * 3878 }
3622 * LOCKING: 3879}
3623 * spin_lock_irqsave(host_set lock)
3624 */
3625 3880
3626void ata_qc_complete(struct ata_queued_cmd *qc) 3881void __ata_qc_complete(struct ata_queued_cmd *qc)
3627{ 3882{
3628 int rc; 3883 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3629 3884 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3630 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3631 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3632 3885
3633 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3886 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3634 ata_sg_clean(qc); 3887 ata_sg_clean(qc);
@@ -3640,17 +3893,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
3640 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3893 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3641 3894
3642 /* call completion callback */ 3895 /* call completion callback */
3643 rc = qc->complete_fn(qc); 3896 qc->complete_fn(qc);
3644
3645 /* if callback indicates not to complete command (non-zero),
3646 * return immediately
3647 */
3648 if (rc != 0)
3649 return;
3650
3651 __ata_qc_complete(qc);
3652
3653 VPRINTK("EXIT\n");
3654} 3897}
3655 3898
3656static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3899static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
@@ -3690,20 +3933,20 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3690 * spin_lock_irqsave(host_set lock) 3933 * spin_lock_irqsave(host_set lock)
3691 * 3934 *
3692 * RETURNS: 3935 * RETURNS:
3693 * Zero on success, negative on error. 3936 * Zero on success, AC_ERR_* mask on failure
3694 */ 3937 */
3695 3938
3696int ata_qc_issue(struct ata_queued_cmd *qc) 3939unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3697{ 3940{
3698 struct ata_port *ap = qc->ap; 3941 struct ata_port *ap = qc->ap;
3699 3942
3700 if (ata_should_dma_map(qc)) { 3943 if (ata_should_dma_map(qc)) {
3701 if (qc->flags & ATA_QCFLAG_SG) { 3944 if (qc->flags & ATA_QCFLAG_SG) {
3702 if (ata_sg_setup(qc)) 3945 if (ata_sg_setup(qc))
3703 goto err_out; 3946 goto sg_err;
3704 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3947 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3705 if (ata_sg_setup_one(qc)) 3948 if (ata_sg_setup_one(qc))
3706 goto err_out; 3949 goto sg_err;
3707 } 3950 }
3708 } else { 3951 } else {
3709 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3952 qc->flags &= ~ATA_QCFLAG_DMAMAP;
@@ -3716,8 +3959,9 @@ int ata_qc_issue(struct ata_queued_cmd *qc)
3716 3959
3717 return ap->ops->qc_issue(qc); 3960 return ap->ops->qc_issue(qc);
3718 3961
3719err_out: 3962sg_err:
3720 return -1; 3963 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3964 return AC_ERR_SYSTEM;
3721} 3965}
3722 3966
3723 3967
@@ -3736,10 +3980,10 @@ err_out:
3736 * spin_lock_irqsave(host_set lock) 3980 * spin_lock_irqsave(host_set lock)
3737 * 3981 *
3738 * RETURNS: 3982 * RETURNS:
3739 * Zero on success, negative on error. 3983 * Zero on success, AC_ERR_* mask on failure
3740 */ 3984 */
3741 3985
3742int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3986unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3743{ 3987{
3744 struct ata_port *ap = qc->ap; 3988 struct ata_port *ap = qc->ap;
3745 3989
@@ -3760,31 +4004,31 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3760 ata_qc_set_polling(qc); 4004 ata_qc_set_polling(qc);
3761 ata_tf_to_host(ap, &qc->tf); 4005 ata_tf_to_host(ap, &qc->tf);
3762 ap->hsm_task_state = HSM_ST; 4006 ap->hsm_task_state = HSM_ST;
3763 queue_work(ata_wq, &ap->pio_task); 4007 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3764 break; 4008 break;
3765 4009
3766 case ATA_PROT_ATAPI: 4010 case ATA_PROT_ATAPI:
3767 ata_qc_set_polling(qc); 4011 ata_qc_set_polling(qc);
3768 ata_tf_to_host(ap, &qc->tf); 4012 ata_tf_to_host(ap, &qc->tf);
3769 queue_work(ata_wq, &ap->packet_task); 4013 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3770 break; 4014 break;
3771 4015
3772 case ATA_PROT_ATAPI_NODATA: 4016 case ATA_PROT_ATAPI_NODATA:
3773 ap->flags |= ATA_FLAG_NOINTR; 4017 ap->flags |= ATA_FLAG_NOINTR;
3774 ata_tf_to_host(ap, &qc->tf); 4018 ata_tf_to_host(ap, &qc->tf);
3775 queue_work(ata_wq, &ap->packet_task); 4019 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3776 break; 4020 break;
3777 4021
3778 case ATA_PROT_ATAPI_DMA: 4022 case ATA_PROT_ATAPI_DMA:
3779 ap->flags |= ATA_FLAG_NOINTR; 4023 ap->flags |= ATA_FLAG_NOINTR;
3780 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4024 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3781 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4025 ap->ops->bmdma_setup(qc); /* set up bmdma */
3782 queue_work(ata_wq, &ap->packet_task); 4026 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3783 break; 4027 break;
3784 4028
3785 default: 4029 default:
3786 WARN_ON(1); 4030 WARN_ON(1);
3787 return -1; 4031 return AC_ERR_SYSTEM;
3788 } 4032 }
3789 4033
3790 return 0; 4034 return 0;
@@ -4147,91 +4391,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4147 return IRQ_RETVAL(handled); 4391 return IRQ_RETVAL(handled);
4148} 4392}
4149 4393
4150/**
4151 * atapi_packet_task - Write CDB bytes to hardware
4152 * @_data: Port to which ATAPI device is attached.
4153 *
4154 * When device has indicated its readiness to accept
4155 * a CDB, this function is called. Send the CDB.
4156 * If DMA is to be performed, exit immediately.
4157 * Otherwise, we are in polling mode, so poll
4158 * status under operation succeeds or fails.
4159 *
4160 * LOCKING:
4161 * Kernel thread context (may sleep)
4162 */
4163
4164static void atapi_packet_task(void *_data)
4165{
4166 struct ata_port *ap = _data;
4167 struct ata_queued_cmd *qc;
4168 u8 status;
4169
4170 qc = ata_qc_from_tag(ap, ap->active_tag);
4171 assert(qc != NULL);
4172 assert(qc->flags & ATA_QCFLAG_ACTIVE);
4173
4174 /* sleep-wait for BSY to clear */
4175 DPRINTK("busy wait\n");
4176 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4177 qc->err_mask |= AC_ERR_ATA_BUS;
4178 goto err_out;
4179 }
4180
4181 /* make sure DRQ is set */
4182 status = ata_chk_status(ap);
4183 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4184 qc->err_mask |= AC_ERR_ATA_BUS;
4185 goto err_out;
4186 }
4187
4188 /* send SCSI cdb */
4189 DPRINTK("send cdb\n");
4190 assert(ap->cdb_len >= 12);
4191
4192 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4193 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4194 unsigned long flags;
4195
4196 /* Once we're done issuing command and kicking bmdma,
4197 * irq handler takes over. To not lose irq, we need
4198 * to clear NOINTR flag before sending cdb, but
4199 * interrupt handler shouldn't be invoked before we're
4200 * finished. Hence, the following locking.
4201 */
4202 spin_lock_irqsave(&ap->host_set->lock, flags);
4203 ap->flags &= ~ATA_FLAG_NOINTR;
4204 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4205 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4206 ap->ops->bmdma_start(qc); /* initiate bmdma */
4207 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4208 } else {
4209 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4210
4211 /* PIO commands are handled by polling */
4212 ap->hsm_task_state = HSM_ST;
4213 queue_work(ata_wq, &ap->pio_task);
4214 }
4215
4216 return;
4217
4218err_out:
4219 ata_poll_qc_complete(qc);
4220}
4221
4222
4223/**
4224 * ata_port_start - Set port up for dma.
4225 * @ap: Port to initialize
4226 *
4227 * Called just after data structures for each port are
4228 * initialized. Allocates space for PRD table.
4229 *
4230 * May be used as the port_start() entry in ata_port_operations.
4231 *
4232 * LOCKING:
4233 * Inherited from caller.
4234 */
4235 4394
4236/* 4395/*
4237 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4396 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
@@ -4284,6 +4443,8 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4284 4443
4285/** 4444/**
4286 * ata_device_resume - wakeup a previously suspended devices 4445 * ata_device_resume - wakeup a previously suspended devices
4446 * @ap: port the device is connected to
4447 * @dev: the device to resume
4287 * 4448 *
4288 * Kick the drive back into action, by sending it an idle immediate 4449 * Kick the drive back into action, by sending it an idle immediate
4289 * command and making sure its transfer mode matches between drive 4450 * command and making sure its transfer mode matches between drive
@@ -4306,10 +4467,11 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4306 4467
4307/** 4468/**
4308 * ata_device_suspend - prepare a device for suspend 4469 * ata_device_suspend - prepare a device for suspend
4470 * @ap: port the device is connected to
4471 * @dev: the device to suspend
4309 * 4472 *
4310 * Flush the cache on the drive, if appropriate, then issue a 4473 * Flush the cache on the drive, if appropriate, then issue a
4311 * standbynow command. 4474 * standbynow command.
4312 *
4313 */ 4475 */
4314int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) 4476int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4315{ 4477{
@@ -4323,6 +4485,19 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4323 return 0; 4485 return 0;
4324} 4486}
4325 4487
4488/**
4489 * ata_port_start - Set port up for dma.
4490 * @ap: Port to initialize
4491 *
4492 * Called just after data structures for each port are
4493 * initialized. Allocates space for PRD table.
4494 *
4495 * May be used as the port_start() entry in ata_port_operations.
4496 *
4497 * LOCKING:
4498 * Inherited from caller.
4499 */
4500
4326int ata_port_start (struct ata_port *ap) 4501int ata_port_start (struct ata_port *ap)
4327{ 4502{
4328 struct device *dev = ap->host_set->dev; 4503 struct device *dev = ap->host_set->dev;
@@ -4436,8 +4611,8 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4436 ap->active_tag = ATA_TAG_POISON; 4611 ap->active_tag = ATA_TAG_POISON;
4437 ap->last_ctl = 0xFF; 4612 ap->last_ctl = 0xFF;
4438 4613
4439 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4614 INIT_WORK(&ap->port_task, NULL, NULL);
4440 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4615 INIT_LIST_HEAD(&ap->eh_done_q);
4441 4616
4442 for (i = 0; i < ATA_MAX_DEVICES; i++) 4617 for (i = 0; i < ATA_MAX_DEVICES; i++)
4443 ap->device[i].devno = i; 4618 ap->device[i].devno = i;
@@ -4579,9 +4754,9 @@ int ata_device_add(const struct ata_probe_ent *ent)
4579 4754
4580 ap = host_set->ports[i]; 4755 ap = host_set->ports[i];
4581 4756
4582 DPRINTK("ata%u: probe begin\n", ap->id); 4757 DPRINTK("ata%u: bus probe begin\n", ap->id);
4583 rc = ata_bus_probe(ap); 4758 rc = ata_bus_probe(ap);
4584 DPRINTK("ata%u: probe end\n", ap->id); 4759 DPRINTK("ata%u: bus probe end\n", ap->id);
4585 4760
4586 if (rc) { 4761 if (rc) {
4587 /* FIXME: do something useful here? 4762 /* FIXME: do something useful here?
@@ -4605,7 +4780,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4605 } 4780 }
4606 4781
4607 /* probes are done, now scan each port's disk(s) */ 4782 /* probes are done, now scan each port's disk(s) */
4608 DPRINTK("probe begin\n"); 4783 DPRINTK("host probe begin\n");
4609 for (i = 0; i < count; i++) { 4784 for (i = 0; i < count; i++) {
4610 struct ata_port *ap = host_set->ports[i]; 4785 struct ata_port *ap = host_set->ports[i];
4611 4786
@@ -4691,11 +4866,14 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4691int ata_scsi_release(struct Scsi_Host *host) 4866int ata_scsi_release(struct Scsi_Host *host)
4692{ 4867{
4693 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 4868 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4869 int i;
4694 4870
4695 DPRINTK("ENTER\n"); 4871 DPRINTK("ENTER\n");
4696 4872
4697 ap->ops->port_disable(ap); 4873 ap->ops->port_disable(ap);
4698 ata_host_remove(ap, 0); 4874 ata_host_remove(ap, 0);
4875 for (i = 0; i < ATA_MAX_DEVICES; i++)
4876 kfree(ap->device[i].id);
4699 4877
4700 DPRINTK("EXIT\n"); 4878 DPRINTK("EXIT\n");
4701 return 1; 4879 return 1;
@@ -4727,32 +4905,6 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4727 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4905 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4728} 4906}
4729 4907
4730static struct ata_probe_ent *
4731ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4732{
4733 struct ata_probe_ent *probe_ent;
4734
4735 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4736 if (!probe_ent) {
4737 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4738 kobject_name(&(dev->kobj)));
4739 return NULL;
4740 }
4741
4742 INIT_LIST_HEAD(&probe_ent->node);
4743 probe_ent->dev = dev;
4744
4745 probe_ent->sht = port->sht;
4746 probe_ent->host_flags = port->host_flags;
4747 probe_ent->pio_mask = port->pio_mask;
4748 probe_ent->mwdma_mask = port->mwdma_mask;
4749 probe_ent->udma_mask = port->udma_mask;
4750 probe_ent->port_ops = port->port_ops;
4751
4752 return probe_ent;
4753}
4754
4755
4756 4908
4757#ifdef CONFIG_PCI 4909#ifdef CONFIG_PCI
4758 4910
@@ -4764,256 +4916,6 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4764} 4916}
4765 4917
4766/** 4918/**
4767 * ata_pci_init_native_mode - Initialize native-mode driver
4768 * @pdev: pci device to be initialized
4769 * @port: array[2] of pointers to port info structures.
4770 * @ports: bitmap of ports present
4771 *
4772 * Utility function which allocates and initializes an
4773 * ata_probe_ent structure for a standard dual-port
4774 * PIO-based IDE controller. The returned ata_probe_ent
4775 * structure can be passed to ata_device_add(). The returned
4776 * ata_probe_ent structure should then be freed with kfree().
4777 *
4778 * The caller need only pass the address of the primary port, the
4779 * secondary will be deduced automatically. If the device has non
4780 * standard secondary port mappings this function can be called twice,
4781 * once for each interface.
4782 */
4783
4784struct ata_probe_ent *
4785ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4786{
4787 struct ata_probe_ent *probe_ent =
4788 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4789 int p = 0;
4790
4791 if (!probe_ent)
4792 return NULL;
4793
4794 probe_ent->irq = pdev->irq;
4795 probe_ent->irq_flags = SA_SHIRQ;
4796 probe_ent->private_data = port[0]->private_data;
4797
4798 if (ports & ATA_PORT_PRIMARY) {
4799 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4800 probe_ent->port[p].altstatus_addr =
4801 probe_ent->port[p].ctl_addr =
4802 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4803 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4804 ata_std_ports(&probe_ent->port[p]);
4805 p++;
4806 }
4807
4808 if (ports & ATA_PORT_SECONDARY) {
4809 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4810 probe_ent->port[p].altstatus_addr =
4811 probe_ent->port[p].ctl_addr =
4812 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4813 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4814 ata_std_ports(&probe_ent->port[p]);
4815 p++;
4816 }
4817
4818 probe_ent->n_ports = p;
4819 return probe_ent;
4820}
4821
4822static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4823{
4824 struct ata_probe_ent *probe_ent;
4825
4826 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4827 if (!probe_ent)
4828 return NULL;
4829
4830 probe_ent->legacy_mode = 1;
4831 probe_ent->n_ports = 1;
4832 probe_ent->hard_port_no = port_num;
4833 probe_ent->private_data = port->private_data;
4834
4835 switch(port_num)
4836 {
4837 case 0:
4838 probe_ent->irq = 14;
4839 probe_ent->port[0].cmd_addr = 0x1f0;
4840 probe_ent->port[0].altstatus_addr =
4841 probe_ent->port[0].ctl_addr = 0x3f6;
4842 break;
4843 case 1:
4844 probe_ent->irq = 15;
4845 probe_ent->port[0].cmd_addr = 0x170;
4846 probe_ent->port[0].altstatus_addr =
4847 probe_ent->port[0].ctl_addr = 0x376;
4848 break;
4849 }
4850 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4851 ata_std_ports(&probe_ent->port[0]);
4852 return probe_ent;
4853}
4854
4855/**
4856 * ata_pci_init_one - Initialize/register PCI IDE host controller
4857 * @pdev: Controller to be initialized
4858 * @port_info: Information from low-level host driver
4859 * @n_ports: Number of ports attached to host controller
4860 *
4861 * This is a helper function which can be called from a driver's
4862 * xxx_init_one() probe function if the hardware uses traditional
4863 * IDE taskfile registers.
4864 *
4865 * This function calls pci_enable_device(), reserves its register
4866 * regions, sets the dma mask, enables bus master mode, and calls
4867 * ata_device_add()
4868 *
4869 * LOCKING:
4870 * Inherited from PCI layer (may sleep).
4871 *
4872 * RETURNS:
4873 * Zero on success, negative on errno-based value on error.
4874 */
4875
4876int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4877 unsigned int n_ports)
4878{
4879 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4880 struct ata_port_info *port[2];
4881 u8 tmp8, mask;
4882 unsigned int legacy_mode = 0;
4883 int disable_dev_on_err = 1;
4884 int rc;
4885
4886 DPRINTK("ENTER\n");
4887
4888 port[0] = port_info[0];
4889 if (n_ports > 1)
4890 port[1] = port_info[1];
4891 else
4892 port[1] = port[0];
4893
4894 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4895 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4896 /* TODO: What if one channel is in native mode ... */
4897 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4898 mask = (1 << 2) | (1 << 0);
4899 if ((tmp8 & mask) != mask)
4900 legacy_mode = (1 << 3);
4901 }
4902
4903 /* FIXME... */
4904 if ((!legacy_mode) && (n_ports > 2)) {
4905 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4906 n_ports = 2;
4907 /* For now */
4908 }
4909
4910 /* FIXME: Really for ATA it isn't safe because the device may be
4911 multi-purpose and we want to leave it alone if it was already
4912 enabled. Secondly for shared use as Arjan says we want refcounting
4913
4914 Checking dev->is_enabled is insufficient as this is not set at
4915 boot for the primary video which is BIOS enabled
4916 */
4917
4918 rc = pci_enable_device(pdev);
4919 if (rc)
4920 return rc;
4921
4922 rc = pci_request_regions(pdev, DRV_NAME);
4923 if (rc) {
4924 disable_dev_on_err = 0;
4925 goto err_out;
4926 }
4927
4928 /* FIXME: Should use platform specific mappers for legacy port ranges */
4929 if (legacy_mode) {
4930 if (!request_region(0x1f0, 8, "libata")) {
4931 struct resource *conflict, res;
4932 res.start = 0x1f0;
4933 res.end = 0x1f0 + 8 - 1;
4934 conflict = ____request_resource(&ioport_resource, &res);
4935 if (!strcmp(conflict->name, "libata"))
4936 legacy_mode |= (1 << 0);
4937 else {
4938 disable_dev_on_err = 0;
4939 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4940 }
4941 } else
4942 legacy_mode |= (1 << 0);
4943
4944 if (!request_region(0x170, 8, "libata")) {
4945 struct resource *conflict, res;
4946 res.start = 0x170;
4947 res.end = 0x170 + 8 - 1;
4948 conflict = ____request_resource(&ioport_resource, &res);
4949 if (!strcmp(conflict->name, "libata"))
4950 legacy_mode |= (1 << 1);
4951 else {
4952 disable_dev_on_err = 0;
4953 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4954 }
4955 } else
4956 legacy_mode |= (1 << 1);
4957 }
4958
4959 /* we have legacy mode, but all ports are unavailable */
4960 if (legacy_mode == (1 << 3)) {
4961 rc = -EBUSY;
4962 goto err_out_regions;
4963 }
4964
4965 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4966 if (rc)
4967 goto err_out_regions;
4968 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4969 if (rc)
4970 goto err_out_regions;
4971
4972 if (legacy_mode) {
4973 if (legacy_mode & (1 << 0))
4974 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4975 if (legacy_mode & (1 << 1))
4976 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4977 } else {
4978 if (n_ports == 2)
4979 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4980 else
4981 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4982 }
4983 if (!probe_ent && !probe_ent2) {
4984 rc = -ENOMEM;
4985 goto err_out_regions;
4986 }
4987
4988 pci_set_master(pdev);
4989
4990 /* FIXME: check ata_device_add return */
4991 if (legacy_mode) {
4992 if (legacy_mode & (1 << 0))
4993 ata_device_add(probe_ent);
4994 if (legacy_mode & (1 << 1))
4995 ata_device_add(probe_ent2);
4996 } else
4997 ata_device_add(probe_ent);
4998
4999 kfree(probe_ent);
5000 kfree(probe_ent2);
5001
5002 return 0;
5003
5004err_out_regions:
5005 if (legacy_mode & (1 << 0))
5006 release_region(0x1f0, 8);
5007 if (legacy_mode & (1 << 1))
5008 release_region(0x170, 8);
5009 pci_release_regions(pdev);
5010err_out:
5011 if (disable_dev_on_err)
5012 pci_disable_device(pdev);
5013 return rc;
5014}
5015
5016/**
5017 * ata_pci_remove_one - PCI layer callback for device removal 4919 * ata_pci_remove_one - PCI layer callback for device removal
5018 * @pdev: PCI device that was removed 4920 * @pdev: PCI device that was removed
5019 * 4921 *
@@ -5143,7 +5045,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5143EXPORT_SYMBOL_GPL(ata_host_set_remove); 5045EXPORT_SYMBOL_GPL(ata_host_set_remove);
5144EXPORT_SYMBOL_GPL(ata_sg_init); 5046EXPORT_SYMBOL_GPL(ata_sg_init);
5145EXPORT_SYMBOL_GPL(ata_sg_init_one); 5047EXPORT_SYMBOL_GPL(ata_sg_init_one);
5146EXPORT_SYMBOL_GPL(ata_qc_complete); 5048EXPORT_SYMBOL_GPL(__ata_qc_complete);
5147EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5049EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5148EXPORT_SYMBOL_GPL(ata_eng_timeout); 5050EXPORT_SYMBOL_GPL(ata_eng_timeout);
5149EXPORT_SYMBOL_GPL(ata_tf_load); 5051EXPORT_SYMBOL_GPL(ata_tf_load);
@@ -5169,18 +5071,30 @@ EXPORT_SYMBOL_GPL(ata_port_probe);
5169EXPORT_SYMBOL_GPL(sata_phy_reset); 5071EXPORT_SYMBOL_GPL(sata_phy_reset);
5170EXPORT_SYMBOL_GPL(__sata_phy_reset); 5072EXPORT_SYMBOL_GPL(__sata_phy_reset);
5171EXPORT_SYMBOL_GPL(ata_bus_reset); 5073EXPORT_SYMBOL_GPL(ata_bus_reset);
5074EXPORT_SYMBOL_GPL(ata_std_probeinit);
5075EXPORT_SYMBOL_GPL(ata_std_softreset);
5076EXPORT_SYMBOL_GPL(sata_std_hardreset);
5077EXPORT_SYMBOL_GPL(ata_std_postreset);
5078EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5079EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5080EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5172EXPORT_SYMBOL_GPL(ata_port_disable); 5081EXPORT_SYMBOL_GPL(ata_port_disable);
5173EXPORT_SYMBOL_GPL(ata_ratelimit); 5082EXPORT_SYMBOL_GPL(ata_ratelimit);
5083EXPORT_SYMBOL_GPL(ata_busy_sleep);
5084EXPORT_SYMBOL_GPL(ata_port_queue_task);
5174EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5085EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5175EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5086EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5087EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5176EXPORT_SYMBOL_GPL(ata_scsi_error); 5088EXPORT_SYMBOL_GPL(ata_scsi_error);
5177EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5089EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5178EXPORT_SYMBOL_GPL(ata_scsi_release); 5090EXPORT_SYMBOL_GPL(ata_scsi_release);
5179EXPORT_SYMBOL_GPL(ata_host_intr); 5091EXPORT_SYMBOL_GPL(ata_host_intr);
5180EXPORT_SYMBOL_GPL(ata_dev_classify); 5092EXPORT_SYMBOL_GPL(ata_dev_classify);
5181EXPORT_SYMBOL_GPL(ata_dev_id_string); 5093EXPORT_SYMBOL_GPL(ata_id_string);
5182EXPORT_SYMBOL_GPL(ata_dev_config); 5094EXPORT_SYMBOL_GPL(ata_id_c_string);
5183EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5095EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5096EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5097EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5184 5098
5185EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5099EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5186EXPORT_SYMBOL_GPL(ata_timing_compute); 5100EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 59503c9ccac9..ccedb4536977 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -151,7 +151,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
151 struct scsi_sense_hdr sshdr; 151 struct scsi_sense_hdr sshdr;
152 enum dma_data_direction data_dir; 152 enum dma_data_direction data_dir;
153 153
154 if (NULL == (void *)arg) 154 if (arg == NULL)
155 return -EINVAL; 155 return -EINVAL;
156 156
157 if (copy_from_user(args, arg, sizeof(args))) 157 if (copy_from_user(args, arg, sizeof(args)))
@@ -201,7 +201,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
201 /* Need code to retrieve data from check condition? */ 201 /* Need code to retrieve data from check condition? */
202 202
203 if ((argbuf) 203 if ((argbuf)
204 && copy_to_user((void *)(arg + sizeof(args)), argbuf, argsize)) 204 && copy_to_user(arg + sizeof(args), argbuf, argsize))
205 rc = -EFAULT; 205 rc = -EFAULT;
206error: 206error:
207 if (argbuf) 207 if (argbuf)
@@ -228,7 +228,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
228 u8 args[7]; 228 u8 args[7];
229 struct scsi_sense_hdr sshdr; 229 struct scsi_sense_hdr sshdr;
230 230
231 if (NULL == (void *)arg) 231 if (arg == NULL)
232 return -EINVAL; 232 return -EINVAL;
233 233
234 if (copy_from_user(args, arg, sizeof(args))) 234 if (copy_from_user(args, arg, sizeof(args)))
@@ -553,7 +553,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
553 /* 553 /*
554 * Read the controller registers. 554 * Read the controller registers.
555 */ 555 */
556 assert(NULL != qc->ap->ops->tf_read); 556 WARN_ON(qc->ap->ops->tf_read == NULL);
557 qc->ap->ops->tf_read(qc->ap, tf); 557 qc->ap->ops->tf_read(qc->ap, tf);
558 558
559 /* 559 /*
@@ -628,7 +628,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
628 /* 628 /*
629 * Read the controller registers. 629 * Read the controller registers.
630 */ 630 */
631 assert(NULL != qc->ap->ops->tf_read); 631 WARN_ON(qc->ap->ops->tf_read == NULL);
632 qc->ap->ops->tf_read(qc->ap, tf); 632 qc->ap->ops->tf_read(qc->ap, tf);
633 633
634 /* 634 /*
@@ -684,23 +684,23 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
684 if (sdev->id < ATA_MAX_DEVICES) { 684 if (sdev->id < ATA_MAX_DEVICES) {
685 struct ata_port *ap; 685 struct ata_port *ap;
686 struct ata_device *dev; 686 struct ata_device *dev;
687 unsigned int max_sectors;
687 688
688 ap = (struct ata_port *) &sdev->host->hostdata[0]; 689 ap = (struct ata_port *) &sdev->host->hostdata[0];
689 dev = &ap->device[sdev->id]; 690 dev = &ap->device[sdev->id];
690 691
691 /* TODO: 1024 is an arbitrary number, not the 692 /* TODO: 2048 is an arbitrary number, not the
692 * hardware maximum. This should be increased to 693 * hardware maximum. This should be increased to
693 * 65534 when Jens Axboe's patch for dynamically 694 * 65534 when Jens Axboe's patch for dynamically
694 * determining max_sectors is merged. 695 * determining max_sectors is merged.
695 */ 696 */
696 if ((dev->flags & ATA_DFLAG_LBA48) && 697 max_sectors = ATA_MAX_SECTORS;
697 ((dev->flags & ATA_DFLAG_LOCK_SECTORS) == 0)) { 698 if (dev->flags & ATA_DFLAG_LBA48)
698 /* 699 max_sectors = 2048;
699 * do not overwrite sdev->host->max_sectors, since 700 if (dev->max_sectors)
700 * other drives on this host may not support LBA48 701 max_sectors = dev->max_sectors;
701 */ 702
702 blk_queue_max_sectors(sdev->request_queue, 2048); 703 blk_queue_max_sectors(sdev->request_queue, max_sectors);
703 }
704 704
705 /* 705 /*
706 * SATA DMA transfers must be multiples of 4 byte, so 706 * SATA DMA transfers must be multiples of 4 byte, so
@@ -717,6 +717,47 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
717} 717}
718 718
719/** 719/**
720 * ata_scsi_timed_out - SCSI layer time out callback
721 * @cmd: timed out SCSI command
722 *
723 * Handles SCSI layer timeout. We race with normal completion of
724 * the qc for @cmd. If the qc is already gone, we lose and let
725 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
726 * timed out and EH should be invoked. Prevent ata_qc_complete()
727 * from finishing it by setting EH_SCHEDULED and return
728 * EH_NOT_HANDLED.
729 *
730 * LOCKING:
731 * Called from timer context
732 *
733 * RETURNS:
734 * EH_HANDLED or EH_NOT_HANDLED
735 */
736enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
737{
738 struct Scsi_Host *host = cmd->device->host;
739 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
740 unsigned long flags;
741 struct ata_queued_cmd *qc;
742 enum scsi_eh_timer_return ret = EH_HANDLED;
743
744 DPRINTK("ENTER\n");
745
746 spin_lock_irqsave(&ap->host_set->lock, flags);
747 qc = ata_qc_from_tag(ap, ap->active_tag);
748 if (qc) {
749 WARN_ON(qc->scsicmd != cmd);
750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
751 qc->err_mask |= AC_ERR_TIMEOUT;
752 ret = EH_NOT_HANDLED;
753 }
754 spin_unlock_irqrestore(&ap->host_set->lock, flags);
755
756 DPRINTK("EXIT, ret=%d\n", ret);
757 return ret;
758}
759
760/**
720 * ata_scsi_error - SCSI layer error handler callback 761 * ata_scsi_error - SCSI layer error handler callback
721 * @host: SCSI host on which error occurred 762 * @host: SCSI host on which error occurred
722 * 763 *
@@ -732,23 +773,84 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
732int ata_scsi_error(struct Scsi_Host *host) 773int ata_scsi_error(struct Scsi_Host *host)
733{ 774{
734 struct ata_port *ap; 775 struct ata_port *ap;
776 unsigned long flags;
735 777
736 DPRINTK("ENTER\n"); 778 DPRINTK("ENTER\n");
737 779
738 ap = (struct ata_port *) &host->hostdata[0]; 780 ap = (struct ata_port *) &host->hostdata[0];
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
784 ap->flags |= ATA_FLAG_IN_EH;
785 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
787
788 ata_port_flush_task(ap);
789
739 ap->ops->eng_timeout(ap); 790 ap->ops->eng_timeout(ap);
740 791
741 /* TODO: this is per-command; when queueing is supported 792 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
742 * this code will either change or move to a more 793
743 * appropriate place 794 scsi_eh_flush_done_q(&ap->eh_done_q);
744 */ 795
745 host->host_failed--; 796 spin_lock_irqsave(&ap->host_set->lock, flags);
746 INIT_LIST_HEAD(&host->eh_cmd_q); 797 ap->flags &= ~ATA_FLAG_IN_EH;
798 spin_unlock_irqrestore(&ap->host_set->lock, flags);
747 799
748 DPRINTK("EXIT\n"); 800 DPRINTK("EXIT\n");
749 return 0; 801 return 0;
750} 802}
751 803
804static void ata_eh_scsidone(struct scsi_cmnd *scmd)
805{
806 /* nada */
807}
808
809static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
810{
811 struct ata_port *ap = qc->ap;
812 struct scsi_cmnd *scmd = qc->scsicmd;
813 unsigned long flags;
814
815 spin_lock_irqsave(&ap->host_set->lock, flags);
816 qc->scsidone = ata_eh_scsidone;
817 __ata_qc_complete(qc);
818 WARN_ON(ata_tag_valid(qc->tag));
819 spin_unlock_irqrestore(&ap->host_set->lock, flags);
820
821 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
822}
823
824/**
825 * ata_eh_qc_complete - Complete an active ATA command from EH
826 * @qc: Command to complete
827 *
828 * Indicate to the mid and upper layers that an ATA command has
829 * completed. To be used from EH.
830 */
831void ata_eh_qc_complete(struct ata_queued_cmd *qc)
832{
833 struct scsi_cmnd *scmd = qc->scsicmd;
834 scmd->retries = scmd->allowed;
835 __ata_eh_qc_complete(qc);
836}
837
838/**
839 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
840 * @qc: Command to retry
841 *
842 * Indicate to the mid and upper layers that an ATA command
843 * should be retried. To be used from EH.
844 *
845 * SCSI midlayer limits the number of retries to scmd->allowed.
846 * This function might need to adjust scmd->retries for commands
847 * which get retried due to unrelated NCQ failures.
848 */
849void ata_eh_qc_retry(struct ata_queued_cmd *qc)
850{
851 __ata_eh_qc_complete(qc);
852}
853
752/** 854/**
753 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 855 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
754 * @qc: Storage for translated ATA taskfile 856 * @qc: Storage for translated ATA taskfile
@@ -985,9 +1087,13 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
985 if (dev->flags & ATA_DFLAG_LBA) { 1087 if (dev->flags & ATA_DFLAG_LBA) {
986 tf->flags |= ATA_TFLAG_LBA; 1088 tf->flags |= ATA_TFLAG_LBA;
987 1089
988 if (dev->flags & ATA_DFLAG_LBA48) { 1090 if (lba_28_ok(block, n_block)) {
989 if (n_block > (64 * 1024)) 1091 /* use LBA28 */
990 goto invalid_fld; 1092 tf->command = ATA_CMD_VERIFY;
1093 tf->device |= (block >> 24) & 0xf;
1094 } else if (lba_48_ok(block, n_block)) {
1095 if (!(dev->flags & ATA_DFLAG_LBA48))
1096 goto out_of_range;
991 1097
992 /* use LBA48 */ 1098 /* use LBA48 */
993 tf->flags |= ATA_TFLAG_LBA48; 1099 tf->flags |= ATA_TFLAG_LBA48;
@@ -998,15 +1104,9 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
998 tf->hob_lbah = (block >> 40) & 0xff; 1104 tf->hob_lbah = (block >> 40) & 0xff;
999 tf->hob_lbam = (block >> 32) & 0xff; 1105 tf->hob_lbam = (block >> 32) & 0xff;
1000 tf->hob_lbal = (block >> 24) & 0xff; 1106 tf->hob_lbal = (block >> 24) & 0xff;
1001 } else { 1107 } else
1002 if (n_block > 256) 1108 /* request too large even for LBA48 */
1003 goto invalid_fld; 1109 goto out_of_range;
1004
1005 /* use LBA28 */
1006 tf->command = ATA_CMD_VERIFY;
1007
1008 tf->device |= (block >> 24) & 0xf;
1009 }
1010 1110
1011 tf->nsect = n_block & 0xff; 1111 tf->nsect = n_block & 0xff;
1012 1112
@@ -1019,8 +1119,8 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
1019 /* CHS */ 1119 /* CHS */
1020 u32 sect, head, cyl, track; 1120 u32 sect, head, cyl, track;
1021 1121
1022 if (n_block > 256) 1122 if (!lba_28_ok(block, n_block))
1023 goto invalid_fld; 1123 goto out_of_range;
1024 1124
1025 /* Convert LBA to CHS */ 1125 /* Convert LBA to CHS */
1026 track = (u32)block / dev->sectors; 1126 track = (u32)block / dev->sectors;
@@ -1139,9 +1239,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1139 if (dev->flags & ATA_DFLAG_LBA) { 1239 if (dev->flags & ATA_DFLAG_LBA) {
1140 tf->flags |= ATA_TFLAG_LBA; 1240 tf->flags |= ATA_TFLAG_LBA;
1141 1241
1142 if (dev->flags & ATA_DFLAG_LBA48) { 1242 if (lba_28_ok(block, n_block)) {
1143 /* The request -may- be too large for LBA48. */ 1243 /* use LBA28 */
1144 if ((block >> 48) || (n_block > 65536)) 1244 tf->device |= (block >> 24) & 0xf;
1245 } else if (lba_48_ok(block, n_block)) {
1246 if (!(dev->flags & ATA_DFLAG_LBA48))
1145 goto out_of_range; 1247 goto out_of_range;
1146 1248
1147 /* use LBA48 */ 1249 /* use LBA48 */
@@ -1152,15 +1254,9 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1152 tf->hob_lbah = (block >> 40) & 0xff; 1254 tf->hob_lbah = (block >> 40) & 0xff;
1153 tf->hob_lbam = (block >> 32) & 0xff; 1255 tf->hob_lbam = (block >> 32) & 0xff;
1154 tf->hob_lbal = (block >> 24) & 0xff; 1256 tf->hob_lbal = (block >> 24) & 0xff;
1155 } else { 1257 } else
1156 /* use LBA28 */ 1258 /* request too large even for LBA48 */
1157 1259 goto out_of_range;
1158 /* The request -may- be too large for LBA28. */
1159 if ((block >> 28) || (n_block > 256))
1160 goto out_of_range;
1161
1162 tf->device |= (block >> 24) & 0xf;
1163 }
1164 1260
1165 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1261 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld; 1262 goto invalid_fld;
@@ -1178,7 +1274,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1178 u32 sect, head, cyl, track; 1274 u32 sect, head, cyl, track;
1179 1275
1180 /* The request -may- be too large for CHS addressing. */ 1276 /* The request -may- be too large for CHS addressing. */
1181 if ((block >> 28) || (n_block > 256)) 1277 if (!lba_28_ok(block, n_block))
1182 goto out_of_range; 1278 goto out_of_range;
1183 1279
1184 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1280 if (unlikely(ata_rwcmd_protocol(qc) < 0))
@@ -1225,7 +1321,7 @@ nothing_to_do:
1225 return 1; 1321 return 1;
1226} 1322}
1227 1323
1228static int ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1324static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1229{ 1325{
1230 struct scsi_cmnd *cmd = qc->scsicmd; 1326 struct scsi_cmnd *cmd = qc->scsicmd;
1231 u8 *cdb = cmd->cmnd; 1327 u8 *cdb = cmd->cmnd;
@@ -1262,7 +1358,7 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1262 1358
1263 qc->scsidone(cmd); 1359 qc->scsidone(cmd);
1264 1360
1265 return 0; 1361 ata_qc_free(qc);
1266} 1362}
1267 1363
1268/** 1364/**
@@ -1328,8 +1424,9 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1328 goto early_finish; 1424 goto early_finish;
1329 1425
1330 /* select device, send command to hardware */ 1426 /* select device, send command to hardware */
1331 if (ata_qc_issue(qc)) 1427 qc->err_mask = ata_qc_issue(qc);
1332 goto err_did; 1428 if (qc->err_mask)
1429 ata_qc_complete(qc);
1333 1430
1334 VPRINTK("EXIT\n"); 1431 VPRINTK("EXIT\n");
1335 return; 1432 return;
@@ -1472,8 +1569,8 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1472 1569
1473 if (buflen > 35) { 1570 if (buflen > 35) {
1474 memcpy(&rbuf[8], "ATA ", 8); 1571 memcpy(&rbuf[8], "ATA ", 8);
1475 ata_dev_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16); 1572 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1476 ata_dev_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4); 1573 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1477 if (rbuf[32] == 0 || rbuf[32] == ' ') 1574 if (rbuf[32] == 0 || rbuf[32] == ' ')
1478 memcpy(&rbuf[32], "n/a ", 4); 1575 memcpy(&rbuf[32], "n/a ", 4);
1479 } 1576 }
@@ -1547,8 +1644,8 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1547 memcpy(rbuf, hdr, sizeof(hdr)); 1644 memcpy(rbuf, hdr, sizeof(hdr));
1548 1645
1549 if (buflen > (ATA_SERNO_LEN + 4 - 1)) 1646 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1550 ata_dev_id_string(args->id, (unsigned char *) &rbuf[4], 1647 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1551 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1648 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1552 1649
1553 return 0; 1650 return 0;
1554} 1651}
@@ -1713,15 +1810,12 @@ static int ata_dev_supports_fua(u16 *id)
1713 if (!ata_id_has_fua(id)) 1810 if (!ata_id_has_fua(id))
1714 return 0; 1811 return 0;
1715 1812
1716 model[40] = '\0'; 1813 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1717 fw[8] = '\0'; 1814 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1718
1719 ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
1720 ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
1721 1815
1722 if (strncmp(model, "Maxtor", 6)) 1816 if (strcmp(model, "Maxtor"))
1723 return 1; 1817 return 1;
1724 if (strncmp(fw, "BANC1G10", 8)) 1818 if (strcmp(fw, "BANC1G10"))
1725 return 1; 1819 return 1;
1726 1820
1727 return 0; /* blacklisted */ 1821 return 0; /* blacklisted */
@@ -2015,7 +2109,7 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2015 done(cmd); 2109 done(cmd);
2016} 2110}
2017 2111
2018static int atapi_sense_complete(struct ata_queued_cmd *qc) 2112static void atapi_sense_complete(struct ata_queued_cmd *qc)
2019{ 2113{
2020 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2114 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0))
2021 /* FIXME: not quite right; we don't want the 2115 /* FIXME: not quite right; we don't want the
@@ -2026,7 +2120,7 @@ static int atapi_sense_complete(struct ata_queued_cmd *qc)
2026 ata_gen_ata_desc_sense(qc); 2120 ata_gen_ata_desc_sense(qc);
2027 2121
2028 qc->scsidone(qc->scsicmd); 2122 qc->scsidone(qc->scsicmd);
2029 return 0; 2123 ata_qc_free(qc);
2030} 2124}
2031 2125
2032/* is it pointless to prefer PIO for "safety reasons"? */ 2126/* is it pointless to prefer PIO for "safety reasons"? */
@@ -2056,7 +2150,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2056 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2150 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2057 qc->dma_dir = DMA_FROM_DEVICE; 2151 qc->dma_dir = DMA_FROM_DEVICE;
2058 2152
2059 memset(&qc->cdb, 0, ap->cdb_len); 2153 memset(&qc->cdb, 0, qc->dev->cdb_len);
2060 qc->cdb[0] = REQUEST_SENSE; 2154 qc->cdb[0] = REQUEST_SENSE;
2061 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2155 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2062 2156
@@ -2075,15 +2169,14 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2075 2169
2076 qc->complete_fn = atapi_sense_complete; 2170 qc->complete_fn = atapi_sense_complete;
2077 2171
2078 if (ata_qc_issue(qc)) { 2172 qc->err_mask = ata_qc_issue(qc);
2079 qc->err_mask |= AC_ERR_OTHER; 2173 if (qc->err_mask)
2080 ata_qc_complete(qc); 2174 ata_qc_complete(qc);
2081 }
2082 2175
2083 DPRINTK("EXIT\n"); 2176 DPRINTK("EXIT\n");
2084} 2177}
2085 2178
2086static int atapi_qc_complete(struct ata_queued_cmd *qc) 2179static void atapi_qc_complete(struct ata_queued_cmd *qc)
2087{ 2180{
2088 struct scsi_cmnd *cmd = qc->scsicmd; 2181 struct scsi_cmnd *cmd = qc->scsicmd;
2089 unsigned int err_mask = qc->err_mask; 2182 unsigned int err_mask = qc->err_mask;
@@ -2093,7 +2186,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2093 if (unlikely(err_mask & AC_ERR_DEV)) { 2186 if (unlikely(err_mask & AC_ERR_DEV)) {
2094 cmd->result = SAM_STAT_CHECK_CONDITION; 2187 cmd->result = SAM_STAT_CHECK_CONDITION;
2095 atapi_request_sense(qc); 2188 atapi_request_sense(qc);
2096 return 1; 2189 return;
2097 } 2190 }
2098 2191
2099 else if (unlikely(err_mask)) 2192 else if (unlikely(err_mask))
@@ -2133,7 +2226,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2133 } 2226 }
2134 2227
2135 qc->scsidone(cmd); 2228 qc->scsidone(cmd);
2136 return 0; 2229 ata_qc_free(qc);
2137} 2230}
2138/** 2231/**
2139 * atapi_xlat - Initialize PACKET taskfile 2232 * atapi_xlat - Initialize PACKET taskfile
@@ -2159,7 +2252,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2159 if (ata_check_atapi_dma(qc)) 2252 if (ata_check_atapi_dma(qc))
2160 using_pio = 1; 2253 using_pio = 1;
2161 2254
2162 memcpy(&qc->cdb, scsicmd, qc->ap->cdb_len); 2255 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2163 2256
2164 qc->complete_fn = atapi_qc_complete; 2257 qc->complete_fn = atapi_qc_complete;
2165 2258
@@ -2519,7 +2612,8 @@ out_unlock:
2519 2612
2520/** 2613/**
2521 * ata_scsi_simulate - simulate SCSI command on ATA device 2614 * ata_scsi_simulate - simulate SCSI command on ATA device
2522 * @id: current IDENTIFY data for target device. 2615 * @ap: port the device is connected to
2616 * @dev: the target device
2523 * @cmd: SCSI command being sent to device. 2617 * @cmd: SCSI command being sent to device.
2524 * @done: SCSI command completion function. 2618 * @done: SCSI command completion function.
2525 * 2619 *
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index fddaf479a544..f4c48c91b63d 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -45,8 +45,9 @@ extern int libata_fua;
45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
46 struct ata_device *dev); 46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_port_flush_task(struct ata_port *ap);
48extern void ata_qc_free(struct ata_queued_cmd *qc); 49extern void ata_qc_free(struct ata_queued_cmd *qc);
49extern int ata_qc_issue(struct ata_queued_cmd *qc); 50extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
50extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
51extern void ata_dev_select(struct ata_port *ap, unsigned int device, 52extern void ata_dev_select(struct ata_port *ap, unsigned int device,
52 unsigned int wait, unsigned int can_sleep); 53 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index e8df0c9ec1e6..5f33cc932e70 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -131,7 +131,7 @@ static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap); 131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap); 132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc); 133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static int adma_qc_issue(struct ata_queued_cmd *qc); 134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc); 136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap); 137static u8 adma_bmdma_status(struct ata_port *ap);
@@ -143,11 +143,11 @@ static struct scsi_host_template adma_ata_sht = {
143 .name = DRV_NAME, 143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl, 144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd, 145 .queuecommand = ata_scsi_queuecmd,
146 .eh_timed_out = ata_scsi_timed_out,
146 .eh_strategy_handler = ata_scsi_error, 147 .eh_strategy_handler = ata_scsi_error,
147 .can_queue = ATA_DEF_QUEUE, 148 .can_queue = ATA_DEF_QUEUE,
148 .this_id = ATA_SHT_THIS_ID, 149 .this_id = ATA_SHT_THIS_ID,
149 .sg_tablesize = LIBATA_MAX_PRD, 150 .sg_tablesize = LIBATA_MAX_PRD,
150 .max_sectors = ATA_MAX_SECTORS,
151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
152 .emulated = ATA_SHT_EMULATED, 152 .emulated = ATA_SHT_EMULATED,
153 .use_clustering = ENABLE_CLUSTERING, 153 .use_clustering = ENABLE_CLUSTERING,
@@ -419,7 +419,7 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); 419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
420} 420}
421 421
422static int adma_qc_issue(struct ata_queued_cmd *qc) 422static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
423{ 423{
424 struct adma_port_priv *pp = qc->ap->private_data; 424 struct adma_port_priv *pp = qc->ap->private_data;
425 425
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 2770005324b4..e561281967dd 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -228,7 +228,9 @@ enum {
228 MV_HP_ERRATA_50XXB2 = (1 << 2), 228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3), 229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4), 230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_50XX = (1 << 5), 231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
232 234
233 /* Port private flags (pp_flags) */ 235 /* Port private flags (pp_flags) */
234 MV_PP_FLAG_EDMA_EN = (1 << 0), 236 MV_PP_FLAG_EDMA_EN = (1 << 0),
@@ -237,6 +239,9 @@ enum {
237 239
238#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 240#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
239#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 241#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242#define IS_GEN_I(hpriv) IS_50XX(hpriv)
243#define IS_GEN_II(hpriv) IS_60XX(hpriv)
244#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
240 245
241enum { 246enum {
242 /* Our DMA boundary is determined by an ePRD being unable to handle 247 /* Our DMA boundary is determined by an ePRD being unable to handle
@@ -255,6 +260,8 @@ enum chip_type {
255 chip_5080, 260 chip_5080,
256 chip_604x, 261 chip_604x,
257 chip_608x, 262 chip_608x,
263 chip_6042,
264 chip_7042,
258}; 265};
259 266
260/* Command ReQuest Block: 32B */ 267/* Command ReQuest Block: 32B */
@@ -265,6 +272,14 @@ struct mv_crqb {
265 u16 ata_cmd[11]; 272 u16 ata_cmd[11];
266}; 273};
267 274
275struct mv_crqb_iie {
276 u32 addr;
277 u32 addr_hi;
278 u32 flags;
279 u32 len;
280 u32 ata_cmd[4];
281};
282
268/* Command ResPonse Block: 8B */ 283/* Command ResPonse Block: 8B */
269struct mv_crpb { 284struct mv_crpb {
270 u16 id; 285 u16 id;
@@ -328,7 +343,8 @@ static void mv_host_stop(struct ata_host_set *host_set);
328static int mv_port_start(struct ata_port *ap); 343static int mv_port_start(struct ata_port *ap);
329static void mv_port_stop(struct ata_port *ap); 344static void mv_port_stop(struct ata_port *ap);
330static void mv_qc_prep(struct ata_queued_cmd *qc); 345static void mv_qc_prep(struct ata_queued_cmd *qc);
331static int mv_qc_issue(struct ata_queued_cmd *qc); 346static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
332static irqreturn_t mv_interrupt(int irq, void *dev_instance, 348static irqreturn_t mv_interrupt(int irq, void *dev_instance,
333 struct pt_regs *regs); 349 struct pt_regs *regs);
334static void mv_eng_timeout(struct ata_port *ap); 350static void mv_eng_timeout(struct ata_port *ap);
@@ -362,11 +378,11 @@ static struct scsi_host_template mv_sht = {
362 .name = DRV_NAME, 378 .name = DRV_NAME,
363 .ioctl = ata_scsi_ioctl, 379 .ioctl = ata_scsi_ioctl,
364 .queuecommand = ata_scsi_queuecmd, 380 .queuecommand = ata_scsi_queuecmd,
381 .eh_timed_out = ata_scsi_timed_out,
365 .eh_strategy_handler = ata_scsi_error, 382 .eh_strategy_handler = ata_scsi_error,
366 .can_queue = MV_USE_Q_DEPTH, 383 .can_queue = MV_USE_Q_DEPTH,
367 .this_id = ATA_SHT_THIS_ID, 384 .this_id = ATA_SHT_THIS_ID,
368 .sg_tablesize = MV_MAX_SG_CT / 2, 385 .sg_tablesize = MV_MAX_SG_CT / 2,
369 .max_sectors = ATA_MAX_SECTORS,
370 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 386 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
371 .emulated = ATA_SHT_EMULATED, 387 .emulated = ATA_SHT_EMULATED,
372 .use_clustering = ATA_SHT_USE_CLUSTERING, 388 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -430,6 +446,33 @@ static const struct ata_port_operations mv6_ops = {
430 .host_stop = mv_host_stop, 446 .host_stop = mv_host_stop,
431}; 447};
432 448
449static const struct ata_port_operations mv_iie_ops = {
450 .port_disable = ata_port_disable,
451
452 .tf_load = ata_tf_load,
453 .tf_read = ata_tf_read,
454 .check_status = ata_check_status,
455 .exec_command = ata_exec_command,
456 .dev_select = ata_std_dev_select,
457
458 .phy_reset = mv_phy_reset,
459
460 .qc_prep = mv_qc_prep_iie,
461 .qc_issue = mv_qc_issue,
462
463 .eng_timeout = mv_eng_timeout,
464
465 .irq_handler = mv_interrupt,
466 .irq_clear = mv_irq_clear,
467
468 .scr_read = mv_scr_read,
469 .scr_write = mv_scr_write,
470
471 .port_start = mv_port_start,
472 .port_stop = mv_port_stop,
473 .host_stop = mv_host_stop,
474};
475
433static const struct ata_port_info mv_port_info[] = { 476static const struct ata_port_info mv_port_info[] = {
434 { /* chip_504x */ 477 { /* chip_504x */
435 .sht = &mv_sht, 478 .sht = &mv_sht,
@@ -467,6 +510,21 @@ static const struct ata_port_info mv_port_info[] = {
467 .udma_mask = 0x7f, /* udma0-6 */ 510 .udma_mask = 0x7f, /* udma0-6 */
468 .port_ops = &mv6_ops, 511 .port_ops = &mv6_ops,
469 }, 512 },
513 { /* chip_6042 */
514 .sht = &mv_sht,
515 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
516 .pio_mask = 0x1f, /* pio0-4 */
517 .udma_mask = 0x7f, /* udma0-6 */
518 .port_ops = &mv_iie_ops,
519 },
520 { /* chip_7042 */
521 .sht = &mv_sht,
522 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
523 MV_FLAG_DUAL_HC),
524 .pio_mask = 0x1f, /* pio0-4 */
525 .udma_mask = 0x7f, /* udma0-6 */
526 .port_ops = &mv_iie_ops,
527 },
470}; 528};
471 529
472static const struct pci_device_id mv_pci_tbl[] = { 530static const struct pci_device_id mv_pci_tbl[] = {
@@ -477,6 +535,7 @@ static const struct pci_device_id mv_pci_tbl[] = {
477 535
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
480 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
482 541
@@ -572,8 +631,8 @@ static void mv_irq_clear(struct ata_port *ap)
572 * @base: port base address 631 * @base: port base address
573 * @pp: port private data 632 * @pp: port private data
574 * 633 *
575 * Verify the local cache of the eDMA state is accurate with an 634 * Verify the local cache of the eDMA state is accurate with a
576 * assert. 635 * WARN_ON.
577 * 636 *
578 * LOCKING: 637 * LOCKING:
579 * Inherited from caller. 638 * Inherited from caller.
@@ -584,15 +643,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
584 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 643 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
585 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 644 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
586 } 645 }
587 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 646 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
588} 647}
589 648
590/** 649/**
591 * mv_stop_dma - Disable eDMA engine 650 * mv_stop_dma - Disable eDMA engine
592 * @ap: ATA channel to manipulate 651 * @ap: ATA channel to manipulate
593 * 652 *
594 * Verify the local cache of the eDMA state is accurate with an 653 * Verify the local cache of the eDMA state is accurate with a
595 * assert. 654 * WARN_ON.
596 * 655 *
597 * LOCKING: 656 * LOCKING:
598 * Inherited from caller. 657 * Inherited from caller.
@@ -610,7 +669,7 @@ static void mv_stop_dma(struct ata_port *ap)
610 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 669 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
611 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 670 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
612 } else { 671 } else {
613 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 672 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
614 } 673 }
615 674
616 /* now properly wait for the eDMA to stop */ 675 /* now properly wait for the eDMA to stop */
@@ -773,6 +832,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
773 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 832 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
774} 833}
775 834
835static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
836{
837 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
838
839 /* set up non-NCQ EDMA configuration */
840 cfg &= ~0x1f; /* clear queue depth */
841 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
842 cfg &= ~(1 << 9); /* disable equeue */
843
844 if (IS_GEN_I(hpriv))
845 cfg |= (1 << 8); /* enab config burst size mask */
846
847 else if (IS_GEN_II(hpriv))
848 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
849
850 else if (IS_GEN_IIE(hpriv)) {
851 cfg |= (1 << 23); /* dis RX PM port mask */
852 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
853 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
854 cfg |= (1 << 18); /* enab early completion */
855 cfg |= (1 << 17); /* enab host q cache */
856 cfg |= (1 << 22); /* enab cutthrough */
857 }
858
859 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
860}
861
776/** 862/**
777 * mv_port_start - Port specific init/start routine. 863 * mv_port_start - Port specific init/start routine.
778 * @ap: ATA channel to manipulate 864 * @ap: ATA channel to manipulate
@@ -786,6 +872,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
786static int mv_port_start(struct ata_port *ap) 872static int mv_port_start(struct ata_port *ap)
787{ 873{
788 struct device *dev = ap->host_set->dev; 874 struct device *dev = ap->host_set->dev;
875 struct mv_host_priv *hpriv = ap->host_set->private_data;
789 struct mv_port_priv *pp; 876 struct mv_port_priv *pp;
790 void __iomem *port_mmio = mv_ap_base(ap); 877 void __iomem *port_mmio = mv_ap_base(ap);
791 void *mem; 878 void *mem;
@@ -829,17 +916,26 @@ static int mv_port_start(struct ata_port *ap)
829 pp->sg_tbl = mem; 916 pp->sg_tbl = mem;
830 pp->sg_tbl_dma = mem_dma; 917 pp->sg_tbl_dma = mem_dma;
831 918
832 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 919 mv_edma_cfg(hpriv, port_mmio);
833 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
834 920
835 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 921 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
836 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 922 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
837 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 923 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
838 924
839 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 925 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
840 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 926 writelfl(pp->crqb_dma & 0xffffffff,
927 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
928 else
929 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
841 930
842 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 931 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
932
933 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
934 writelfl(pp->crpb_dma & 0xffffffff,
935 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
936 else
937 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
938
843 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 939 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 940 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845 941
@@ -960,21 +1056,19 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
960 struct ata_taskfile *tf; 1056 struct ata_taskfile *tf;
961 u16 flags = 0; 1057 u16 flags = 0;
962 1058
963 if (ATA_PROT_DMA != qc->tf.protocol) { 1059 if (ATA_PROT_DMA != qc->tf.protocol)
964 return; 1060 return;
965 }
966 1061
967 /* the req producer index should be the same as we remember it */ 1062 /* the req producer index should be the same as we remember it */
968 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1063 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
969 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1064 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
970 pp->req_producer); 1065 pp->req_producer);
971 1066
972 /* Fill in command request block 1067 /* Fill in command request block
973 */ 1068 */
974 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1069 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
975 flags |= CRQB_FLAG_READ; 1070 flags |= CRQB_FLAG_READ;
976 } 1071 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
977 assert(MV_MAX_Q_DEPTH > qc->tag);
978 flags |= qc->tag << CRQB_TAG_SHIFT; 1072 flags |= qc->tag << CRQB_TAG_SHIFT;
979 1073
980 pp->crqb[pp->req_producer].sg_addr = 1074 pp->crqb[pp->req_producer].sg_addr =
@@ -1029,9 +1123,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1029 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1123 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1030 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1124 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1031 1125
1032 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { 1126 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1127 return;
1128 mv_fill_sg(qc);
1129}
1130
1131/**
1132 * mv_qc_prep_iie - Host specific command preparation.
1133 * @qc: queued command to prepare
1134 *
1135 * This routine simply redirects to the general purpose routine
1136 * if command is not DMA. Else, it handles prep of the CRQB
1137 * (command request block), does some sanity checking, and calls
1138 * the SG load routine.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144{
1145 struct ata_port *ap = qc->ap;
1146 struct mv_port_priv *pp = ap->private_data;
1147 struct mv_crqb_iie *crqb;
1148 struct ata_taskfile *tf;
1149 u32 flags = 0;
1150
1151 if (ATA_PROT_DMA != qc->tf.protocol)
1152 return;
1153
1154 /* the req producer index should be the same as we remember it */
1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer);
1158
1159 /* Fill in Gen IIE command request block
1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ;
1163
1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1168 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1169 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1170 crqb->flags = cpu_to_le32(flags);
1171
1172 tf = &qc->tf;
1173 crqb->ata_cmd[0] = cpu_to_le32(
1174 (tf->command << 16) |
1175 (tf->feature << 24)
1176 );
1177 crqb->ata_cmd[1] = cpu_to_le32(
1178 (tf->lbal << 0) |
1179 (tf->lbam << 8) |
1180 (tf->lbah << 16) |
1181 (tf->device << 24)
1182 );
1183 crqb->ata_cmd[2] = cpu_to_le32(
1184 (tf->hob_lbal << 0) |
1185 (tf->hob_lbam << 8) |
1186 (tf->hob_lbah << 16) |
1187 (tf->hob_feature << 24)
1188 );
1189 crqb->ata_cmd[3] = cpu_to_le32(
1190 (tf->nsect << 0) |
1191 (tf->hob_nsect << 8)
1192 );
1193
1194 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1033 return; 1195 return;
1034 }
1035 mv_fill_sg(qc); 1196 mv_fill_sg(qc);
1036} 1197}
1037 1198
@@ -1047,7 +1208,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1047 * LOCKING: 1208 * LOCKING:
1048 * Inherited from caller. 1209 * Inherited from caller.
1049 */ 1210 */
1050static int mv_qc_issue(struct ata_queued_cmd *qc) 1211static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1051{ 1212{
1052 void __iomem *port_mmio = mv_ap_base(qc->ap); 1213 void __iomem *port_mmio = mv_ap_base(qc->ap);
1053 struct mv_port_priv *pp = qc->ap->private_data; 1214 struct mv_port_priv *pp = qc->ap->private_data;
@@ -1065,12 +1226,12 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1065 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1066 1227
1067 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1068 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1069 pp->req_producer); 1230 pp->req_producer);
1070 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1071 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1072 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1073 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1074 1235
1075 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1076 1237
@@ -1090,7 +1251,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1090 * 1251 *
1091 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1092 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1093 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1094 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1095 * prove that we're up to date. 1256 * prove that we're up to date.
1096 * 1257 *
@@ -1106,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1106 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1107 1268
1108 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1109 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1110 pp->rsp_consumer); 1271 pp->rsp_consumer);
1111 1272
1112 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1113 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1114 1275
1115 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1116 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1117 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1118 pp->rsp_consumer); 1279 pp->rsp_consumer);
1119 1280
1120 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1121 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
@@ -1192,7 +1353,6 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1192 u32 hc_irq_cause; 1353 u32 hc_irq_cause;
1193 int shift, port, port0, hard_port, handled; 1354 int shift, port, port0, hard_port, handled;
1194 unsigned int err_mask; 1355 unsigned int err_mask;
1195 u8 ata_status = 0;
1196 1356
1197 if (hc == 0) { 1357 if (hc == 0) {
1198 port0 = 0; 1358 port0 = 0;
@@ -1210,6 +1370,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1210 hc,relevant,hc_irq_cause); 1370 hc,relevant,hc_irq_cause);
1211 1371
1212 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1373 u8 ata_status = 0;
1213 ap = host_set->ports[port]; 1374 ap = host_set->ports[port];
1214 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1375 hard_port = port & MV_PORT_MASK; /* range 0-3 */
1215 handled = 0; /* ensure ata_status is set if handled++ */ 1376 handled = 0; /* ensure ata_status is set if handled++ */
@@ -1681,6 +1842,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1681 m2 |= hpriv->signal[port].pre; 1842 m2 |= hpriv->signal[port].pre;
1682 m2 &= ~(1 << 16); 1843 m2 &= ~(1 << 16);
1683 1844
1845 /* according to mvSata 3.6.1, some IIE values are fixed */
1846 if (IS_GEN_IIE(hpriv)) {
1847 m2 &= ~0xC30FF01F;
1848 m2 |= 0x0000900F;
1849 }
1850
1684 writel(m2, port_mmio + PHY_MODE2); 1851 writel(m2, port_mmio + PHY_MODE2);
1685} 1852}
1686 1853
@@ -1846,7 +2013,6 @@ static void mv_phy_reset(struct ata_port *ap)
1846static void mv_eng_timeout(struct ata_port *ap) 2013static void mv_eng_timeout(struct ata_port *ap)
1847{ 2014{
1848 struct ata_queued_cmd *qc; 2015 struct ata_queued_cmd *qc;
1849 unsigned long flags;
1850 2016
1851 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2017 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1852 DPRINTK("All regs @ start of eng_timeout\n"); 2018 DPRINTK("All regs @ start of eng_timeout\n");
@@ -1861,22 +2027,8 @@ static void mv_eng_timeout(struct ata_port *ap)
1861 mv_err_intr(ap); 2027 mv_err_intr(ap);
1862 mv_stop_and_reset(ap); 2028 mv_stop_and_reset(ap);
1863 2029
1864 if (!qc) { 2030 qc->err_mask |= AC_ERR_TIMEOUT;
1865 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 2031 ata_eh_qc_complete(qc);
1866 ap->id);
1867 } else {
1868 /* hack alert! We cannot use the supplied completion
1869 * function from inside the ->eh_strategy_handler() thread.
1870 * libata is the only user of ->eh_strategy_handler() in
1871 * any kernel, so the default scsi_done() assumes it is
1872 * not being called from the SCSI EH.
1873 */
1874 spin_lock_irqsave(&ap->host_set->lock, flags);
1875 qc->scsidone = scsi_finish_command;
1876 qc->err_mask |= AC_ERR_OTHER;
1877 ata_qc_complete(qc);
1878 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1879 }
1880} 2032}
1881 2033
1882/** 2034/**
@@ -1995,6 +2147,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1995 } 2147 }
1996 break; 2148 break;
1997 2149
2150 case chip_7042:
2151 case chip_6042:
2152 hpriv->ops = &mv6xxx_ops;
2153
2154 hp_flags |= MV_HP_GEN_IIE;
2155
2156 switch (rev_id) {
2157 case 0x0:
2158 hp_flags |= MV_HP_ERRATA_XX42A0;
2159 break;
2160 case 0x1:
2161 hp_flags |= MV_HP_ERRATA_60X1C0;
2162 break;
2163 default:
2164 dev_printk(KERN_WARNING, &pdev->dev,
2165 "Applying 60X1C0 workarounds to unknown rev\n");
2166 hp_flags |= MV_HP_ERRATA_60X1C0;
2167 break;
2168 }
2169 break;
2170
1998 default: 2171 default:
1999 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2172 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2000 return 1; 2173 return 1;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index bbbb55eeb73a..caffadc2e0ae 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -229,11 +229,11 @@ static struct scsi_host_template nv_sht = {
229 .name = DRV_NAME, 229 .name = DRV_NAME,
230 .ioctl = ata_scsi_ioctl, 230 .ioctl = ata_scsi_ioctl,
231 .queuecommand = ata_scsi_queuecmd, 231 .queuecommand = ata_scsi_queuecmd,
232 .eh_timed_out = ata_scsi_timed_out,
232 .eh_strategy_handler = ata_scsi_error, 233 .eh_strategy_handler = ata_scsi_error,
233 .can_queue = ATA_DEF_QUEUE, 234 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID, 235 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD, 236 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED, 238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING, 239 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index b0b0a69b3563..84cb3940ad88 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.03" 49#define DRV_VERSION "1.04"
50 50
51 51
52enum { 52enum {
@@ -58,6 +58,7 @@ enum {
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
62 63
63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
@@ -67,8 +68,10 @@ enum {
67 board_20319 = 1, /* FastTrak S150 TX4 */ 68 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 69 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */ 70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
70 73
71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
72 75
73 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
74 77
@@ -82,6 +85,10 @@ struct pdc_port_priv {
82 dma_addr_t pkt_dma; 85 dma_addr_t pkt_dma;
83}; 86};
84 87
88struct pdc_host_priv {
89 int hotplug_offset;
90};
91
85static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); 92static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 93static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 94static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -95,7 +102,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc);
95static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 102static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
96static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 103static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
97static void pdc_irq_clear(struct ata_port *ap); 104static void pdc_irq_clear(struct ata_port *ap);
98static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 105static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
106static void pdc_host_stop(struct ata_host_set *host_set);
99 107
100 108
101static struct scsi_host_template pdc_ata_sht = { 109static struct scsi_host_template pdc_ata_sht = {
@@ -103,11 +111,11 @@ static struct scsi_host_template pdc_ata_sht = {
103 .name = DRV_NAME, 111 .name = DRV_NAME,
104 .ioctl = ata_scsi_ioctl, 112 .ioctl = ata_scsi_ioctl,
105 .queuecommand = ata_scsi_queuecmd, 113 .queuecommand = ata_scsi_queuecmd,
114 .eh_timed_out = ata_scsi_timed_out,
106 .eh_strategy_handler = ata_scsi_error, 115 .eh_strategy_handler = ata_scsi_error,
107 .can_queue = ATA_DEF_QUEUE, 116 .can_queue = ATA_DEF_QUEUE,
108 .this_id = ATA_SHT_THIS_ID, 117 .this_id = ATA_SHT_THIS_ID,
109 .sg_tablesize = LIBATA_MAX_PRD, 118 .sg_tablesize = LIBATA_MAX_PRD,
110 .max_sectors = ATA_MAX_SECTORS,
111 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 119 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
112 .emulated = ATA_SHT_EMULATED, 120 .emulated = ATA_SHT_EMULATED,
113 .use_clustering = ATA_SHT_USE_CLUSTERING, 121 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,7 +145,7 @@ static const struct ata_port_operations pdc_sata_ops = {
137 .scr_write = pdc_sata_scr_write, 145 .scr_write = pdc_sata_scr_write,
138 .port_start = pdc_port_start, 146 .port_start = pdc_port_start,
139 .port_stop = pdc_port_stop, 147 .port_stop = pdc_port_stop,
140 .host_stop = ata_pci_host_stop, 148 .host_stop = pdc_host_stop,
141}; 149};
142 150
143static const struct ata_port_operations pdc_pata_ops = { 151static const struct ata_port_operations pdc_pata_ops = {
@@ -158,7 +166,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 166
159 .port_start = pdc_port_start, 167 .port_start = pdc_port_start,
160 .port_stop = pdc_port_stop, 168 .port_stop = pdc_port_stop,
161 .host_stop = ata_pci_host_stop, 169 .host_stop = pdc_host_stop,
162}; 170};
163 171
164static const struct ata_port_info pdc_port_info[] = { 172static const struct ata_port_info pdc_port_info[] = {
@@ -201,6 +209,26 @@ static const struct ata_port_info pdc_port_info[] = {
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 209 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops, 210 .port_ops = &pdc_sata_ops,
203 }, 211 },
212
213 /* board_2057x */
214 {
215 .sht = &pdc_ata_sht,
216 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
220 .port_ops = &pdc_sata_ops,
221 },
222
223 /* board_40518 */
224 {
225 .sht = &pdc_ata_sht,
226 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
230 .port_ops = &pdc_sata_ops,
231 },
204}; 232};
205 233
206static const struct pci_device_id pdc_ata_pci_tbl[] = { 234static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -217,9 +245,9 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
217 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 245 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
218 board_2037x }, 246 board_2037x },
219 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 247 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
220 board_2037x }, 248 board_2057x },
221 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 249 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
222 board_2037x }, 250 board_2057x },
223 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 251 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
224 board_2037x }, 252 board_2037x },
225 253
@@ -227,12 +255,14 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
227 board_20319 }, 255 board_20319 },
228 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 256 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
229 board_20319 }, 257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
230 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 260 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
231 board_20319 }, 261 board_20319 },
232 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 262 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
233 board_20319 }, 263 board_20319 },
234 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 264 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
235 board_20319 }, 265 board_40518 },
236 266
237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 267 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_20619 }, 268 board_20619 },
@@ -261,12 +291,11 @@ static int pdc_port_start(struct ata_port *ap)
261 if (rc) 291 if (rc)
262 return rc; 292 return rc;
263 293
264 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 294 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
265 if (!pp) { 295 if (!pp) {
266 rc = -ENOMEM; 296 rc = -ENOMEM;
267 goto err_out; 297 goto err_out;
268 } 298 }
269 memset(pp, 0, sizeof(*pp));
270 299
271 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 300 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
272 if (!pp->pkt) { 301 if (!pp->pkt) {
@@ -298,6 +327,16 @@ static void pdc_port_stop(struct ata_port *ap)
298} 327}
299 328
300 329
330static void pdc_host_stop(struct ata_host_set *host_set)
331{
332 struct pdc_host_priv *hp = host_set->private_data;
333
334 ata_pci_host_stop(host_set);
335
336 kfree(hp);
337}
338
339
301static void pdc_reset_port(struct ata_port *ap) 340static void pdc_reset_port(struct ata_port *ap)
302{ 341{
303 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 342 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
@@ -394,19 +433,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
394 spin_lock_irqsave(&host_set->lock, flags); 433 spin_lock_irqsave(&host_set->lock, flags);
395 434
396 qc = ata_qc_from_tag(ap, ap->active_tag); 435 qc = ata_qc_from_tag(ap, ap->active_tag);
397 if (!qc) {
398 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
399 ap->id);
400 goto out;
401 }
402
403 /* hack alert! We cannot use the supplied completion
404 * function from inside the ->eh_strategy_handler() thread.
405 * libata is the only user of ->eh_strategy_handler() in
406 * any kernel, so the default scsi_done() assumes it is
407 * not being called from the SCSI EH.
408 */
409 qc->scsidone = scsi_finish_command;
410 436
411 switch (qc->tf.protocol) { 437 switch (qc->tf.protocol) {
412 case ATA_PROT_DMA: 438 case ATA_PROT_DMA:
@@ -414,7 +440,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
414 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 440 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
415 drv_stat = ata_wait_idle(ap); 441 drv_stat = ata_wait_idle(ap);
416 qc->err_mask |= __ac_err_mask(drv_stat); 442 qc->err_mask |= __ac_err_mask(drv_stat);
417 ata_qc_complete(qc);
418 break; 443 break;
419 444
420 default: 445 default:
@@ -424,12 +449,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
424 ap->id, qc->tf.command, drv_stat); 449 ap->id, qc->tf.command, drv_stat);
425 450
426 qc->err_mask |= ac_err_mask(drv_stat); 451 qc->err_mask |= ac_err_mask(drv_stat);
427 ata_qc_complete(qc);
428 break; 452 break;
429 } 453 }
430 454
431out:
432 spin_unlock_irqrestore(&host_set->lock, flags); 455 spin_unlock_irqrestore(&host_set->lock, flags);
456 ata_eh_qc_complete(qc);
433 DPRINTK("EXIT\n"); 457 DPRINTK("EXIT\n");
434} 458}
435 459
@@ -495,14 +519,15 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
495 VPRINTK("QUICK EXIT 2\n"); 519 VPRINTK("QUICK EXIT 2\n");
496 return IRQ_NONE; 520 return IRQ_NONE;
497 } 521 }
522
523 spin_lock(&host_set->lock);
524
498 mask &= 0xffff; /* only 16 tags possible */ 525 mask &= 0xffff; /* only 16 tags possible */
499 if (!mask) { 526 if (!mask) {
500 VPRINTK("QUICK EXIT 3\n"); 527 VPRINTK("QUICK EXIT 3\n");
501 return IRQ_NONE; 528 goto done_irq;
502 } 529 }
503 530
504 spin_lock(&host_set->lock);
505
506 writel(mask, mmio_base + PDC_INT_SEQMASK); 531 writel(mask, mmio_base + PDC_INT_SEQMASK);
507 532
508 for (i = 0; i < host_set->n_ports; i++) { 533 for (i = 0; i < host_set->n_ports; i++) {
@@ -519,10 +544,10 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
519 } 544 }
520 } 545 }
521 546
522 spin_unlock(&host_set->lock);
523
524 VPRINTK("EXIT\n"); 547 VPRINTK("EXIT\n");
525 548
549done_irq:
550 spin_unlock(&host_set->lock);
526 return IRQ_RETVAL(handled); 551 return IRQ_RETVAL(handled);
527} 552}
528 553
@@ -544,7 +569,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
544 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 569 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
545} 570}
546 571
547static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 572static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
548{ 573{
549 switch (qc->tf.protocol) { 574 switch (qc->tf.protocol) {
550 case ATA_PROT_DMA: 575 case ATA_PROT_DMA:
@@ -600,6 +625,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
600static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 625static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
601{ 626{
602 void __iomem *mmio = pe->mmio_base; 627 void __iomem *mmio = pe->mmio_base;
628 struct pdc_host_priv *hp = pe->private_data;
629 int hotplug_offset = hp->hotplug_offset;
603 u32 tmp; 630 u32 tmp;
604 631
605 /* 632 /*
@@ -614,12 +641,12 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
614 writel(tmp, mmio + PDC_FLASH_CTL); 641 writel(tmp, mmio + PDC_FLASH_CTL);
615 642
616 /* clear plug/unplug flags for all ports */ 643 /* clear plug/unplug flags for all ports */
617 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 644 tmp = readl(mmio + hotplug_offset);
618 writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); 645 writel(tmp | 0xff, mmio + hotplug_offset);
619 646
620 /* mask plug/unplug ints */ 647 /* mask plug/unplug ints */
621 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 648 tmp = readl(mmio + hotplug_offset);
622 writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); 649 writel(tmp | 0xff0000, mmio + hotplug_offset);
623 650
624 /* reduce TBG clock to 133 Mhz. */ 651 /* reduce TBG clock to 133 Mhz. */
625 tmp = readl(mmio + PDC_TBG_MODE); 652 tmp = readl(mmio + PDC_TBG_MODE);
@@ -641,6 +668,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
641{ 668{
642 static int printed_version; 669 static int printed_version;
643 struct ata_probe_ent *probe_ent = NULL; 670 struct ata_probe_ent *probe_ent = NULL;
671 struct pdc_host_priv *hp;
644 unsigned long base; 672 unsigned long base;
645 void __iomem *mmio_base; 673 void __iomem *mmio_base;
646 unsigned int board_idx = (unsigned int) ent->driver_data; 674 unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -671,13 +699,12 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
671 if (rc) 699 if (rc)
672 goto err_out_regions; 700 goto err_out_regions;
673 701
674 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 702 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
675 if (probe_ent == NULL) { 703 if (probe_ent == NULL) {
676 rc = -ENOMEM; 704 rc = -ENOMEM;
677 goto err_out_regions; 705 goto err_out_regions;
678 } 706 }
679 707
680 memset(probe_ent, 0, sizeof(*probe_ent));
681 probe_ent->dev = pci_dev_to_dev(pdev); 708 probe_ent->dev = pci_dev_to_dev(pdev);
682 INIT_LIST_HEAD(&probe_ent->node); 709 INIT_LIST_HEAD(&probe_ent->node);
683 710
@@ -688,6 +715,16 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
688 } 715 }
689 base = (unsigned long) mmio_base; 716 base = (unsigned long) mmio_base;
690 717
718 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
719 if (hp == NULL) {
720 rc = -ENOMEM;
721 goto err_out_free_ent;
722 }
723
724 /* Set default hotplug offset */
725 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
726 probe_ent->private_data = hp;
727
691 probe_ent->sht = pdc_port_info[board_idx].sht; 728 probe_ent->sht = pdc_port_info[board_idx].sht;
692 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 729 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
693 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 730 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
@@ -707,6 +744,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
707 744
708 /* notice 4-port boards */ 745 /* notice 4-port boards */
709 switch (board_idx) { 746 switch (board_idx) {
747 case board_40518:
748 /* Override hotplug offset for SATAII150 */
749 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
750 /* Fall through */
710 case board_20319: 751 case board_20319:
711 probe_ent->n_ports = 4; 752 probe_ent->n_ports = 4;
712 753
@@ -716,6 +757,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
716 probe_ent->port[2].scr_addr = base + 0x600; 757 probe_ent->port[2].scr_addr = base + 0x600;
717 probe_ent->port[3].scr_addr = base + 0x700; 758 probe_ent->port[3].scr_addr = base + 0x700;
718 break; 759 break;
760 case board_2057x:
761 /* Override hotplug offset for SATAII150 */
762 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
763 /* Fall through */
719 case board_2037x: 764 case board_2037x:
720 probe_ent->n_ports = 2; 765 probe_ent->n_ports = 2;
721 break; 766 break;
@@ -741,8 +786,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
741 /* initialize adapter */ 786 /* initialize adapter */
742 pdc_host_init(board_idx, probe_ent); 787 pdc_host_init(board_idx, probe_ent);
743 788
744 /* FIXME: check ata_device_add return value */ 789 /* FIXME: Need any other frees than hp? */
745 ata_device_add(probe_ent); 790 if (!ata_device_add(probe_ent))
791 kfree(hp);
792
746 kfree(probe_ent); 793 kfree(probe_ent);
747 794
748 return 0; 795 return 0;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 80480f0fb2b8..9602f43a298e 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -120,7 +120,7 @@ static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap); 120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static int qs_qc_issue(struct ata_queued_cmd *qc); 123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc); 125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap); 126static u8 qs_bmdma_status(struct ata_port *ap);
@@ -132,11 +132,11 @@ static struct scsi_host_template qs_ata_sht = {
132 .name = DRV_NAME, 132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl, 133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd, 134 .queuecommand = ata_scsi_queuecmd,
135 .eh_timed_out = ata_scsi_timed_out,
135 .eh_strategy_handler = ata_scsi_error, 136 .eh_strategy_handler = ata_scsi_error,
136 .can_queue = ATA_DEF_QUEUE, 137 .can_queue = ATA_DEF_QUEUE,
137 .this_id = ATA_SHT_THIS_ID, 138 .this_id = ATA_SHT_THIS_ID,
138 .sg_tablesize = QS_MAX_PRD, 139 .sg_tablesize = QS_MAX_PRD,
139 .max_sectors = ATA_MAX_SECTORS,
140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
141 .emulated = ATA_SHT_EMULATED, 141 .emulated = ATA_SHT_EMULATED,
142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, 142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -276,8 +276,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
276 unsigned int nelem; 276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES; 277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278 278
279 assert(qc->__sg != NULL); 279 WARN_ON(qc->__sg == NULL);
280 assert(qc->n_elem > 0 || qc->pad_len > 0); 280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281 281
282 nelem = 0; 282 nelem = 0;
283 ata_for_each_sg(sg, qc) { 283 ata_for_each_sg(sg, qc) {
@@ -352,7 +352,7 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
352 readl(chan + QS_CCT_CFF); /* flush */ 352 readl(chan + QS_CCT_CFF); /* flush */
353} 353}
354 354
355static int qs_qc_issue(struct ata_queued_cmd *qc) 355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{ 356{
357 struct qs_port_priv *pp = qc->ap->private_data; 357 struct qs_port_priv *pp = qc->ap->private_data;
358 358
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 9face3c6aa21..4f2a67ed39d8 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -49,24 +49,30 @@
49#define DRV_VERSION "0.9" 49#define DRV_VERSION "0.9"
50 50
51enum { 51enum {
52 /*
53 * host flags
54 */
52 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
53 SIL_FLAG_MOD15WRITE = (1 << 30), 56 SIL_FLAG_MOD15WRITE = (1 << 30),
57 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
58 ATA_FLAG_MMIO,
54 59
60 /*
61 * Controller IDs
62 */
55 sil_3112 = 0, 63 sil_3112 = 0,
56 sil_3112_m15w = 1, 64 sil_3512 = 1,
57 sil_3512 = 2, 65 sil_3114 = 2,
58 sil_3114 = 3,
59
60 SIL_FIFO_R0 = 0x40,
61 SIL_FIFO_W0 = 0x41,
62 SIL_FIFO_R1 = 0x44,
63 SIL_FIFO_W1 = 0x45,
64 SIL_FIFO_R2 = 0x240,
65 SIL_FIFO_W2 = 0x241,
66 SIL_FIFO_R3 = 0x244,
67 SIL_FIFO_W3 = 0x245,
68 66
67 /*
68 * Register offsets
69 */
69 SIL_SYSCFG = 0x48, 70 SIL_SYSCFG = 0x48,
71
72 /*
73 * Register bits
74 */
75 /* SYSCFG */
70 SIL_MASK_IDE0_INT = (1 << 22), 76 SIL_MASK_IDE0_INT = (1 << 22),
71 SIL_MASK_IDE1_INT = (1 << 23), 77 SIL_MASK_IDE1_INT = (1 << 23),
72 SIL_MASK_IDE2_INT = (1 << 24), 78 SIL_MASK_IDE2_INT = (1 << 24),
@@ -75,9 +81,12 @@ enum {
75 SIL_MASK_4PORT = SIL_MASK_2PORT | 81 SIL_MASK_4PORT = SIL_MASK_2PORT |
76 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 82 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
77 83
78 SIL_IDE2_BMDMA = 0x200, 84 /* BMDMA/BMDMA2 */
79
80 SIL_INTR_STEERING = (1 << 1), 85 SIL_INTR_STEERING = (1 << 1),
86
87 /*
88 * Others
89 */
81 SIL_QUIRK_MOD15WRITE = (1 << 0), 90 SIL_QUIRK_MOD15WRITE = (1 << 0),
82 SIL_QUIRK_UDMA5MAX = (1 << 1), 91 SIL_QUIRK_UDMA5MAX = (1 << 1),
83}; 92};
@@ -90,13 +99,13 @@ static void sil_post_set_mode (struct ata_port *ap);
90 99
91 100
92static const struct pci_device_id sil_pci_tbl[] = { 101static const struct pci_device_id sil_pci_tbl[] = {
93 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 102 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
94 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 103 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
95 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, 104 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
96 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, 105 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
97 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 106 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
98 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 107 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
99 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 108 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
100 { } /* terminate list */ 109 { } /* terminate list */
101}; 110};
102 111
@@ -137,11 +146,11 @@ static struct scsi_host_template sil_sht = {
137 .name = DRV_NAME, 146 .name = DRV_NAME,
138 .ioctl = ata_scsi_ioctl, 147 .ioctl = ata_scsi_ioctl,
139 .queuecommand = ata_scsi_queuecmd, 148 .queuecommand = ata_scsi_queuecmd,
149 .eh_timed_out = ata_scsi_timed_out,
140 .eh_strategy_handler = ata_scsi_error, 150 .eh_strategy_handler = ata_scsi_error,
141 .can_queue = ATA_DEF_QUEUE, 151 .can_queue = ATA_DEF_QUEUE,
142 .this_id = ATA_SHT_THIS_ID, 152 .this_id = ATA_SHT_THIS_ID,
143 .sg_tablesize = LIBATA_MAX_PRD, 153 .sg_tablesize = LIBATA_MAX_PRD,
144 .max_sectors = ATA_MAX_SECTORS,
145 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 154 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
146 .emulated = ATA_SHT_EMULATED, 155 .emulated = ATA_SHT_EMULATED,
147 .use_clustering = ATA_SHT_USE_CLUSTERING, 156 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -159,7 +168,7 @@ static const struct ata_port_operations sil_ops = {
159 .check_status = ata_check_status, 168 .check_status = ata_check_status,
160 .exec_command = ata_exec_command, 169 .exec_command = ata_exec_command,
161 .dev_select = ata_std_dev_select, 170 .dev_select = ata_std_dev_select,
162 .phy_reset = sata_phy_reset, 171 .probe_reset = ata_std_probe_reset,
163 .post_set_mode = sil_post_set_mode, 172 .post_set_mode = sil_post_set_mode,
164 .bmdma_setup = ata_bmdma_setup, 173 .bmdma_setup = ata_bmdma_setup,
165 .bmdma_start = ata_bmdma_start, 174 .bmdma_start = ata_bmdma_start,
@@ -181,19 +190,7 @@ static const struct ata_port_info sil_port_info[] = {
181 /* sil_3112 */ 190 /* sil_3112 */
182 { 191 {
183 .sht = &sil_sht, 192 .sht = &sil_sht,
184 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 193 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
185 ATA_FLAG_SRST | ATA_FLAG_MMIO,
186 .pio_mask = 0x1f, /* pio0-4 */
187 .mwdma_mask = 0x07, /* mwdma0-2 */
188 .udma_mask = 0x3f, /* udma0-5 */
189 .port_ops = &sil_ops,
190 },
191 /* sil_3112_15w - keep it sync'd w/ sil_3112 */
192 {
193 .sht = &sil_sht,
194 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
195 ATA_FLAG_SRST | ATA_FLAG_MMIO |
196 SIL_FLAG_MOD15WRITE,
197 .pio_mask = 0x1f, /* pio0-4 */ 194 .pio_mask = 0x1f, /* pio0-4 */
198 .mwdma_mask = 0x07, /* mwdma0-2 */ 195 .mwdma_mask = 0x07, /* mwdma0-2 */
199 .udma_mask = 0x3f, /* udma0-5 */ 196 .udma_mask = 0x3f, /* udma0-5 */
@@ -202,9 +199,7 @@ static const struct ata_port_info sil_port_info[] = {
202 /* sil_3512 */ 199 /* sil_3512 */
203 { 200 {
204 .sht = &sil_sht, 201 .sht = &sil_sht,
205 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 202 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
206 ATA_FLAG_SRST | ATA_FLAG_MMIO |
207 SIL_FLAG_RERR_ON_DMA_ACT,
208 .pio_mask = 0x1f, /* pio0-4 */ 203 .pio_mask = 0x1f, /* pio0-4 */
209 .mwdma_mask = 0x07, /* mwdma0-2 */ 204 .mwdma_mask = 0x07, /* mwdma0-2 */
210 .udma_mask = 0x3f, /* udma0-5 */ 205 .udma_mask = 0x3f, /* udma0-5 */
@@ -213,9 +208,7 @@ static const struct ata_port_info sil_port_info[] = {
213 /* sil_3114 */ 208 /* sil_3114 */
214 { 209 {
215 .sht = &sil_sht, 210 .sht = &sil_sht,
216 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 211 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
217 ATA_FLAG_SRST | ATA_FLAG_MMIO |
218 SIL_FLAG_RERR_ON_DMA_ACT,
219 .pio_mask = 0x1f, /* pio0-4 */ 212 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */ 213 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x3f, /* udma0-5 */ 214 .udma_mask = 0x3f, /* udma0-5 */
@@ -229,16 +222,17 @@ static const struct {
229 unsigned long tf; /* ATA taskfile register block */ 222 unsigned long tf; /* ATA taskfile register block */
230 unsigned long ctl; /* ATA control/altstatus register block */ 223 unsigned long ctl; /* ATA control/altstatus register block */
231 unsigned long bmdma; /* DMA register block */ 224 unsigned long bmdma; /* DMA register block */
225 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
232 unsigned long scr; /* SATA control register block */ 226 unsigned long scr; /* SATA control register block */
233 unsigned long sien; /* SATA Interrupt Enable register */ 227 unsigned long sien; /* SATA Interrupt Enable register */
234 unsigned long xfer_mode;/* data transfer mode register */ 228 unsigned long xfer_mode;/* data transfer mode register */
235 unsigned long sfis_cfg; /* SATA FIS reception config register */ 229 unsigned long sfis_cfg; /* SATA FIS reception config register */
236} sil_port[] = { 230} sil_port[] = {
237 /* port 0 ... */ 231 /* port 0 ... */
238 { 0x80, 0x8A, 0x00, 0x100, 0x148, 0xb4, 0x14c }, 232 { 0x80, 0x8A, 0x00, 0x40, 0x100, 0x148, 0xb4, 0x14c },
239 { 0xC0, 0xCA, 0x08, 0x180, 0x1c8, 0xf4, 0x1cc }, 233 { 0xC0, 0xCA, 0x08, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
240 { 0x280, 0x28A, 0x200, 0x300, 0x348, 0x2b4, 0x34c }, 234 { 0x280, 0x28A, 0x200, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
241 { 0x2C0, 0x2CA, 0x208, 0x380, 0x3c8, 0x2f4, 0x3cc }, 235 { 0x2C0, 0x2CA, 0x208, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
242 /* ... port 3 */ 236 /* ... port 3 */
243}; 237};
244 238
@@ -354,22 +348,12 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
354static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 348static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
355{ 349{
356 unsigned int n, quirks = 0; 350 unsigned int n, quirks = 0;
357 unsigned char model_num[40]; 351 unsigned char model_num[41];
358 const char *s;
359 unsigned int len;
360 352
361 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 353 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
362 sizeof(model_num));
363 s = &model_num[0];
364 len = strnlen(s, sizeof(model_num));
365
366 /* ATAPI specifies that empty space is blank-filled; remove blanks */
367 while ((len > 0) && (s[len - 1] == ' '))
368 len--;
369 354
370 for (n = 0; sil_blacklist[n].product; n++) 355 for (n = 0; sil_blacklist[n].product; n++)
371 if (!memcmp(sil_blacklist[n].product, s, 356 if (!strcmp(sil_blacklist[n].product, model_num)) {
372 strlen(sil_blacklist[n].product))) {
373 quirks = sil_blacklist[n].quirk; 357 quirks = sil_blacklist[n].quirk;
374 break; 358 break;
375 } 359 }
@@ -380,16 +364,14 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
380 (quirks & SIL_QUIRK_MOD15WRITE))) { 364 (quirks & SIL_QUIRK_MOD15WRITE))) {
381 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 365 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
382 ap->id, dev->devno); 366 ap->id, dev->devno);
383 ap->host->max_sectors = 15; 367 dev->max_sectors = 15;
384 ap->host->hostt->max_sectors = 15;
385 dev->flags |= ATA_DFLAG_LOCK_SECTORS;
386 return; 368 return;
387 } 369 }
388 370
389 /* limit to udma5 */ 371 /* limit to udma5 */
390 if (quirks & SIL_QUIRK_UDMA5MAX) { 372 if (quirks & SIL_QUIRK_UDMA5MAX) {
391 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 373 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
392 ap->id, dev->devno, s); 374 ap->id, dev->devno, model_num);
393 ap->udma_mask &= ATA_UDMA5; 375 ap->udma_mask &= ATA_UDMA5;
394 return; 376 return;
395 } 377 }
@@ -431,13 +413,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
431 if (rc) 413 if (rc)
432 goto err_out_regions; 414 goto err_out_regions;
433 415
434 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 416 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
435 if (probe_ent == NULL) { 417 if (probe_ent == NULL) {
436 rc = -ENOMEM; 418 rc = -ENOMEM;
437 goto err_out_regions; 419 goto err_out_regions;
438 } 420 }
439 421
440 memset(probe_ent, 0, sizeof(*probe_ent));
441 INIT_LIST_HEAD(&probe_ent->node); 422 INIT_LIST_HEAD(&probe_ent->node);
442 probe_ent->dev = pci_dev_to_dev(pdev); 423 probe_ent->dev = pci_dev_to_dev(pdev);
443 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; 424 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
@@ -474,19 +455,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
474 if (cls) { 455 if (cls) {
475 cls >>= 3; 456 cls >>= 3;
476 cls++; /* cls = (line_size/8)+1 */ 457 cls++; /* cls = (line_size/8)+1 */
477 writeb(cls, mmio_base + SIL_FIFO_R0); 458 for (i = 0; i < probe_ent->n_ports; i++)
478 writeb(cls, mmio_base + SIL_FIFO_W0); 459 writew(cls << 8 | cls,
479 writeb(cls, mmio_base + SIL_FIFO_R1); 460 mmio_base + sil_port[i].fifo_cfg);
480 writeb(cls, mmio_base + SIL_FIFO_W1);
481 if (ent->driver_data == sil_3114) {
482 writeb(cls, mmio_base + SIL_FIFO_R2);
483 writeb(cls, mmio_base + SIL_FIFO_W2);
484 writeb(cls, mmio_base + SIL_FIFO_R3);
485 writeb(cls, mmio_base + SIL_FIFO_W3);
486 }
487 } else 461 } else
488 dev_printk(KERN_WARNING, &pdev->dev, 462 dev_printk(KERN_WARNING, &pdev->dev,
489 "cache line size not set. Driver may not function\n"); 463 "cache line size not set. Driver may not function\n");
490 464
491 /* Apply R_ERR on DMA activate FIS errata workaround */ 465 /* Apply R_ERR on DMA activate FIS errata workaround */
492 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 466 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
@@ -509,10 +483,10 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
509 irq_mask = SIL_MASK_4PORT; 483 irq_mask = SIL_MASK_4PORT;
510 484
511 /* flip the magic "make 4 ports work" bit */ 485 /* flip the magic "make 4 ports work" bit */
512 tmp = readl(mmio_base + SIL_IDE2_BMDMA); 486 tmp = readl(mmio_base + sil_port[2].bmdma);
513 if ((tmp & SIL_INTR_STEERING) == 0) 487 if ((tmp & SIL_INTR_STEERING) == 0)
514 writel(tmp | SIL_INTR_STEERING, 488 writel(tmp | SIL_INTR_STEERING,
515 mmio_base + SIL_IDE2_BMDMA); 489 mmio_base + sil_port[2].bmdma);
516 490
517 } else { 491 } else {
518 irq_mask = SIL_MASK_2PORT; 492 irq_mask = SIL_MASK_2PORT;
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 923130185a9e..9a53a5ed38c5 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -249,9 +249,9 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static void sil24_phy_reset(struct ata_port *ap); 252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 253static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static int sil24_qc_issue(struct ata_queued_cmd *qc); 254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 255static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap); 256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
@@ -262,6 +262,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
262 262
263static const struct pci_device_id sil24_pci_tbl[] = { 263static const struct pci_device_id sil24_pci_tbl[] = {
264 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 264 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
265 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
265 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 }, 266 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
266 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 267 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
267 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 268 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
@@ -280,11 +281,11 @@ static struct scsi_host_template sil24_sht = {
280 .name = DRV_NAME, 281 .name = DRV_NAME,
281 .ioctl = ata_scsi_ioctl, 282 .ioctl = ata_scsi_ioctl,
282 .queuecommand = ata_scsi_queuecmd, 283 .queuecommand = ata_scsi_queuecmd,
284 .eh_timed_out = ata_scsi_timed_out,
283 .eh_strategy_handler = ata_scsi_error, 285 .eh_strategy_handler = ata_scsi_error,
284 .can_queue = ATA_DEF_QUEUE, 286 .can_queue = ATA_DEF_QUEUE,
285 .this_id = ATA_SHT_THIS_ID, 287 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 288 .sg_tablesize = LIBATA_MAX_PRD,
287 .max_sectors = ATA_MAX_SECTORS,
288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 289 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
289 .emulated = ATA_SHT_EMULATED, 290 .emulated = ATA_SHT_EMULATED,
290 .use_clustering = ATA_SHT_USE_CLUSTERING, 291 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -305,7 +306,7 @@ static const struct ata_port_operations sil24_ops = {
305 306
306 .tf_read = sil24_tf_read, 307 .tf_read = sil24_tf_read,
307 308
308 .phy_reset = sil24_phy_reset, 309 .probe_reset = sil24_probe_reset,
309 310
310 .qc_prep = sil24_qc_prep, 311 .qc_prep = sil24_qc_prep,
311 .qc_issue = sil24_qc_issue, 312 .qc_issue = sil24_qc_issue,
@@ -335,8 +336,8 @@ static struct ata_port_info sil24_port_info[] = {
335 { 336 {
336 .sht = &sil24_sht, 337 .sht = &sil24_sht,
337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
338 ATA_FLAG_SRST | ATA_FLAG_MMIO | 339 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
339 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), 340 SIL24_NPORTS2FLAG(4),
340 .pio_mask = 0x1f, /* pio0-4 */ 341 .pio_mask = 0x1f, /* pio0-4 */
341 .mwdma_mask = 0x07, /* mwdma0-2 */ 342 .mwdma_mask = 0x07, /* mwdma0-2 */
342 .udma_mask = 0x3f, /* udma0-5 */ 343 .udma_mask = 0x3f, /* udma0-5 */
@@ -346,8 +347,8 @@ static struct ata_port_info sil24_port_info[] = {
346 { 347 {
347 .sht = &sil24_sht, 348 .sht = &sil24_sht,
348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 349 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
349 ATA_FLAG_SRST | ATA_FLAG_MMIO | 350 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
350 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), 351 SIL24_NPORTS2FLAG(2),
351 .pio_mask = 0x1f, /* pio0-4 */ 352 .pio_mask = 0x1f, /* pio0-4 */
352 .mwdma_mask = 0x07, /* mwdma0-2 */ 353 .mwdma_mask = 0x07, /* mwdma0-2 */
353 .udma_mask = 0x3f, /* udma0-5 */ 354 .udma_mask = 0x3f, /* udma0-5 */
@@ -357,8 +358,8 @@ static struct ata_port_info sil24_port_info[] = {
357 { 358 {
358 .sht = &sil24_sht, 359 .sht = &sil24_sht,
359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 360 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
360 ATA_FLAG_SRST | ATA_FLAG_MMIO | 361 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
361 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), 362 SIL24_NPORTS2FLAG(1),
362 .pio_mask = 0x1f, /* pio0-4 */ 363 .pio_mask = 0x1f, /* pio0-4 */
363 .mwdma_mask = 0x07, /* mwdma0-2 */ 364 .mwdma_mask = 0x07, /* mwdma0-2 */
364 .udma_mask = 0x3f, /* udma0-5 */ 365 .udma_mask = 0x3f, /* udma0-5 */
@@ -370,7 +371,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
370{ 371{
371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 372 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
372 373
373 if (ap->cdb_len == 16) 374 if (dev->cdb_len == 16)
374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 375 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
375 else 376 else
376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 377 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
@@ -427,14 +428,23 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
427 *tf = pp->tf; 428 *tf = pp->tf;
428} 429}
429 430
430static int sil24_issue_SRST(struct ata_port *ap) 431static int sil24_softreset(struct ata_port *ap, int verbose,
432 unsigned int *class)
431{ 433{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 434 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 435 struct sil24_port_priv *pp = ap->private_data;
434 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 436 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
435 dma_addr_t paddr = pp->cmd_block_dma; 437 dma_addr_t paddr = pp->cmd_block_dma;
438 unsigned long timeout = jiffies + ATA_TMOUT_BOOT * HZ;
436 u32 irq_enable, irq_stat; 439 u32 irq_enable, irq_stat;
437 int cnt; 440
441 DPRINTK("ENTER\n");
442
443 if (!sata_dev_present(ap)) {
444 DPRINTK("PHY reports no device\n");
445 *class = ATA_DEV_NONE;
446 goto out;
447 }
438 448
439 /* temporarily turn off IRQs during SRST */ 449 /* temporarily turn off IRQs during SRST */
440 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 450 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
@@ -451,7 +461,7 @@ static int sil24_issue_SRST(struct ata_port *ap)
451 461
452 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 462 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
453 463
454 for (cnt = 0; cnt < 100; cnt++) { 464 do {
455 irq_stat = readl(port + PORT_IRQ_STAT); 465 irq_stat = readl(port + PORT_IRQ_STAT);
456 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ 466 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
457 467
@@ -459,36 +469,42 @@ static int sil24_issue_SRST(struct ata_port *ap)
459 if (irq_stat & (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR)) 469 if (irq_stat & (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR))
460 break; 470 break;
461 471
462 msleep(1); 472 msleep(100);
463 } 473 } while (time_before(jiffies, timeout));
464 474
465 /* restore IRQs */ 475 /* restore IRQs */
466 writel(irq_enable, port + PORT_IRQ_ENABLE_SET); 476 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
467 477
468 if (!(irq_stat & PORT_IRQ_COMPLETE)) 478 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
469 return -1; 479 DPRINTK("EXIT, srst failed\n");
480 return -EIO;
481 }
470 482
471 /* update TF */
472 sil24_update_tf(ap); 483 sil24_update_tf(ap);
484 *class = ata_dev_classify(&pp->tf);
485
486 if (*class == ATA_DEV_UNKNOWN)
487 *class = ATA_DEV_NONE;
488
489 out:
490 DPRINTK("EXIT, class=%u\n", *class);
473 return 0; 491 return 0;
474} 492}
475 493
476static void sil24_phy_reset(struct ata_port *ap) 494static int sil24_hardreset(struct ata_port *ap, int verbose,
495 unsigned int *class)
477{ 496{
478 struct sil24_port_priv *pp = ap->private_data; 497 unsigned int dummy_class;
479 498
480 __sata_phy_reset(ap); 499 /* sil24 doesn't report device signature after hard reset */
481 if (ap->flags & ATA_FLAG_PORT_DISABLED) 500 return sata_std_hardreset(ap, verbose, &dummy_class);
482 return; 501}
483
484 if (sil24_issue_SRST(ap) < 0) {
485 printk(KERN_ERR DRV_NAME
486 " ata%u: SRST failed, disabling port\n", ap->id);
487 ap->ops->port_disable(ap);
488 return;
489 }
490 502
491 ap->device->class = ata_dev_classify(&pp->tf); 503static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
504{
505 return ata_drive_probe_reset(ap, ata_std_probeinit,
506 sil24_softreset, sil24_hardreset,
507 ata_std_postreset, classes);
492} 508}
493 509
494static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 510static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -533,7 +549,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
533 prb = &cb->atapi.prb; 549 prb = &cb->atapi.prb;
534 sge = cb->atapi.sge; 550 sge = cb->atapi.sge;
535 memset(cb->atapi.cdb, 0, 32); 551 memset(cb->atapi.cdb, 0, 32);
536 memcpy(cb->atapi.cdb, qc->cdb, ap->cdb_len); 552 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
537 553
538 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 554 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
539 if (qc->tf.flags & ATA_TFLAG_WRITE) 555 if (qc->tf.flags & ATA_TFLAG_WRITE)
@@ -557,7 +573,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
557 sil24_fill_sg(qc, sge); 573 sil24_fill_sg(qc, sge);
558} 574}
559 575
560static int sil24_qc_issue(struct ata_queued_cmd *qc) 576static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
561{ 577{
562 struct ata_port *ap = qc->ap; 578 struct ata_port *ap = qc->ap;
563 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 579 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -638,23 +654,10 @@ static void sil24_eng_timeout(struct ata_port *ap)
638 struct ata_queued_cmd *qc; 654 struct ata_queued_cmd *qc;
639 655
640 qc = ata_qc_from_tag(ap, ap->active_tag); 656 qc = ata_qc_from_tag(ap, ap->active_tag);
641 if (!qc) {
642 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
643 ap->id);
644 return;
645 }
646 657
647 /*
648 * hack alert! We cannot use the supplied completion
649 * function from inside the ->eh_strategy_handler() thread.
650 * libata is the only user of ->eh_strategy_handler() in
651 * any kernel, so the default scsi_done() assumes it is
652 * not being called from the SCSI EH.
653 */
654 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 658 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
655 qc->scsidone = scsi_finish_command; 659 qc->err_mask |= AC_ERR_TIMEOUT;
656 qc->err_mask |= AC_ERR_OTHER; 660 ata_eh_qc_complete(qc);
657 ata_qc_complete(qc);
658 661
659 sil24_reset_controller(ap); 662 sil24_reset_controller(ap);
660} 663}
@@ -895,6 +898,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
895 probe_ent->sht = pinfo->sht; 898 probe_ent->sht = pinfo->sht;
896 probe_ent->host_flags = pinfo->host_flags; 899 probe_ent->host_flags = pinfo->host_flags;
897 probe_ent->pio_mask = pinfo->pio_mask; 900 probe_ent->pio_mask = pinfo->pio_mask;
901 probe_ent->mwdma_mask = pinfo->mwdma_mask;
898 probe_ent->udma_mask = pinfo->udma_mask; 902 probe_ent->udma_mask = pinfo->udma_mask;
899 probe_ent->port_ops = pinfo->port_ops; 903 probe_ent->port_ops = pinfo->port_ops;
900 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); 904 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 2df8c5632ac3..7fd45f86de99 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -87,11 +87,11 @@ static struct scsi_host_template sis_sht = {
87 .name = DRV_NAME, 87 .name = DRV_NAME,
88 .ioctl = ata_scsi_ioctl, 88 .ioctl = ata_scsi_ioctl,
89 .queuecommand = ata_scsi_queuecmd, 89 .queuecommand = ata_scsi_queuecmd,
90 .eh_timed_out = ata_scsi_timed_out,
90 .eh_strategy_handler = ata_scsi_error, 91 .eh_strategy_handler = ata_scsi_error,
91 .can_queue = ATA_DEF_QUEUE, 92 .can_queue = ATA_DEF_QUEUE,
92 .this_id = ATA_SHT_THIS_ID, 93 .this_id = ATA_SHT_THIS_ID,
93 .sg_tablesize = ATA_MAX_PRD, 94 .sg_tablesize = ATA_MAX_PRD,
94 .max_sectors = ATA_MAX_SECTORS,
95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
96 .emulated = ATA_SHT_EMULATED, 96 .emulated = ATA_SHT_EMULATED,
97 .use_clustering = ATA_SHT_USE_CLUSTERING, 97 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d8472563fde8..4aaccd53e736 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -288,11 +288,11 @@ static struct scsi_host_template k2_sata_sht = {
288 .name = DRV_NAME, 288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl, 289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd, 290 .queuecommand = ata_scsi_queuecmd,
291 .eh_timed_out = ata_scsi_timed_out,
291 .eh_strategy_handler = ata_scsi_error, 292 .eh_strategy_handler = ata_scsi_error,
292 .can_queue = ATA_DEF_QUEUE, 293 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID, 294 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD, 295 .sg_tablesize = LIBATA_MAX_PRD,
295 .max_sectors = ATA_MAX_SECTORS,
296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
297 .emulated = ATA_SHT_EMULATED, 297 .emulated = ATA_SHT_EMULATED,
298 .use_clustering = ATA_SHT_USE_CLUSTERING, 298 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index bc87c16c80d2..9f8a76815402 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -174,7 +174,7 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, 174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size); 175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap); 176static void pdc20621_irq_clear(struct ata_port *ap);
177static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); 177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178 178
179 179
180static struct scsi_host_template pdc_sata_sht = { 180static struct scsi_host_template pdc_sata_sht = {
@@ -182,11 +182,11 @@ static struct scsi_host_template pdc_sata_sht = {
182 .name = DRV_NAME, 182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl, 183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd, 184 .queuecommand = ata_scsi_queuecmd,
185 .eh_timed_out = ata_scsi_timed_out,
185 .eh_strategy_handler = ata_scsi_error, 186 .eh_strategy_handler = ata_scsi_error,
186 .can_queue = ATA_DEF_QUEUE, 187 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 188 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 189 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 192 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -460,7 +460,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
460 unsigned int i, idx, total_len = 0, sgt_len; 460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462 462
463 assert(qc->flags & ATA_QCFLAG_DMAMAP); 463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464 464
465 VPRINTK("ata%u: ENTER\n", ap->id); 465 VPRINTK("ata%u: ENTER\n", ap->id);
466 466
@@ -678,7 +678,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
678 } 678 }
679} 679}
680 680
681static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) 681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{ 682{
683 switch (qc->tf.protocol) { 683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA: 684 case ATA_PROT_DMA:
@@ -866,26 +866,12 @@ static void pdc_eng_timeout(struct ata_port *ap)
866 spin_lock_irqsave(&host_set->lock, flags); 866 spin_lock_irqsave(&host_set->lock, flags);
867 867
868 qc = ata_qc_from_tag(ap, ap->active_tag); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 if (!qc) {
870 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
871 ap->id);
872 goto out;
873 }
874
875 /* hack alert! We cannot use the supplied completion
876 * function from inside the ->eh_strategy_handler() thread.
877 * libata is the only user of ->eh_strategy_handler() in
878 * any kernel, so the default scsi_done() assumes it is
879 * not being called from the SCSI EH.
880 */
881 qc->scsidone = scsi_finish_command;
882 869
883 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
884 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
885 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
886 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
887 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
888 ata_qc_complete(qc);
889 break; 875 break;
890 876
891 default: 877 default:
@@ -895,12 +881,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
895 ap->id, qc->tf.command, drv_stat); 881 ap->id, qc->tf.command, drv_stat);
896 882
897 qc->err_mask |= ac_err_mask(drv_stat); 883 qc->err_mask |= ac_err_mask(drv_stat);
898 ata_qc_complete(qc);
899 break; 884 break;
900 } 885 }
901 886
902out:
903 spin_unlock_irqrestore(&host_set->lock, flags); 887 spin_unlock_irqrestore(&host_set->lock, flags);
888 ata_eh_qc_complete(qc);
904 DPRINTK("EXIT\n"); 889 DPRINTK("EXIT\n");
905} 890}
906 891
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 9635ca700977..37a487b7d655 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -75,11 +75,11 @@ static struct scsi_host_template uli_sht = {
75 .name = DRV_NAME, 75 .name = DRV_NAME,
76 .ioctl = ata_scsi_ioctl, 76 .ioctl = ata_scsi_ioctl,
77 .queuecommand = ata_scsi_queuecmd, 77 .queuecommand = ata_scsi_queuecmd,
78 .eh_timed_out = ata_scsi_timed_out,
78 .eh_strategy_handler = ata_scsi_error, 79 .eh_strategy_handler = ata_scsi_error,
79 .can_queue = ATA_DEF_QUEUE, 80 .can_queue = ATA_DEF_QUEUE,
80 .this_id = ATA_SHT_THIS_ID, 81 .this_id = ATA_SHT_THIS_ID,
81 .sg_tablesize = LIBATA_MAX_PRD, 82 .sg_tablesize = LIBATA_MAX_PRD,
82 .max_sectors = ATA_MAX_SECTORS,
83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
84 .emulated = ATA_SHT_EMULATED, 84 .emulated = ATA_SHT_EMULATED,
85 .use_clustering = ATA_SHT_USE_CLUSTERING, 85 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 6d5b0a794cfd..ff65a0b0457f 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -94,11 +94,11 @@ static struct scsi_host_template svia_sht = {
94 .name = DRV_NAME, 94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl, 95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd, 96 .queuecommand = ata_scsi_queuecmd,
97 .eh_timed_out = ata_scsi_timed_out,
97 .eh_strategy_handler = ata_scsi_error, 98 .eh_strategy_handler = ata_scsi_error,
98 .can_queue = ATA_DEF_QUEUE, 99 .can_queue = ATA_DEF_QUEUE,
99 .this_id = ATA_SHT_THIS_ID, 100 .this_id = ATA_SHT_THIS_ID,
100 .sg_tablesize = LIBATA_MAX_PRD, 101 .sg_tablesize = LIBATA_MAX_PRD,
101 .max_sectors = ATA_MAX_SECTORS,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED, 103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING, 104 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index e484e8db6810..b574379a7a82 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -251,11 +251,11 @@ static struct scsi_host_template vsc_sata_sht = {
251 .name = DRV_NAME, 251 .name = DRV_NAME,
252 .ioctl = ata_scsi_ioctl, 252 .ioctl = ata_scsi_ioctl,
253 .queuecommand = ata_scsi_queuecmd, 253 .queuecommand = ata_scsi_queuecmd,
254 .eh_timed_out = ata_scsi_timed_out,
254 .eh_strategy_handler = ata_scsi_error, 255 .eh_strategy_handler = ata_scsi_error,
255 .can_queue = ATA_DEF_QUEUE, 256 .can_queue = ATA_DEF_QUEUE,
256 .this_id = ATA_SHT_THIS_ID, 257 .this_id = ATA_SHT_THIS_ID,
257 .sg_tablesize = LIBATA_MAX_PRD, 258 .sg_tablesize = LIBATA_MAX_PRD,
258 .max_sectors = ATA_MAX_SECTORS,
259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
260 .emulated = ATA_SHT_EMULATED, 260 .emulated = ATA_SHT_EMULATED,
261 .use_clustering = ATA_SHT_USE_CLUSTERING, 261 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ff82ccfbb106..5d169a2881b9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -584,8 +584,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
584 * keep a list of pending commands for final completion, and once we 584 * keep a list of pending commands for final completion, and once we
585 * are ready to leave error handling we handle completion for real. 585 * are ready to leave error handling we handle completion for real.
586 **/ 586 **/
587static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 587void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
588 struct list_head *done_q)
589{ 588{
590 scmd->device->host->host_failed--; 589 scmd->device->host->host_failed--;
591 scmd->eh_eflags = 0; 590 scmd->eh_eflags = 0;
@@ -597,6 +596,7 @@ static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
597 scsi_setup_cmd_retry(scmd); 596 scsi_setup_cmd_retry(scmd);
598 list_move_tail(&scmd->eh_entry, done_q); 597 list_move_tail(&scmd->eh_entry, done_q);
599} 598}
599EXPORT_SYMBOL(scsi_eh_finish_cmd);
600 600
601/** 601/**
602 * scsi_eh_get_sense - Get device sense data. 602 * scsi_eh_get_sense - Get device sense data.
@@ -1425,7 +1425,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1425 * @done_q: list_head of processed commands. 1425 * @done_q: list_head of processed commands.
1426 * 1426 *
1427 **/ 1427 **/
1428static void scsi_eh_flush_done_q(struct list_head *done_q) 1428void scsi_eh_flush_done_q(struct list_head *done_q)
1429{ 1429{
1430 struct scsi_cmnd *scmd, *next; 1430 struct scsi_cmnd *scmd, *next;
1431 1431
@@ -1454,6 +1454,7 @@ static void scsi_eh_flush_done_q(struct list_head *done_q)
1454 } 1454 }
1455 } 1455 }
1456} 1456}
1457EXPORT_SYMBOL(scsi_eh_flush_done_q);
1457 1458
1458/** 1459/**
1459 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1460 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index b3c561abe3f6..89e5413cc2a3 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -582,6 +582,13 @@ config SERIAL_SUNSAB_CONSOLE
582 on your Sparc system as the console, you can do so by answering 582 on your Sparc system as the console, you can do so by answering
583 Y to this option. 583 Y to this option.
584 584
585config SERIAL_SUNHV
586 bool "Sun4v Hypervisor Console support"
587 depends on SPARC64
588 help
589 This driver supports the console device found on SUN4V Sparc
590 systems. Say Y if you want to be able to use this device.
591
585config SERIAL_IP22_ZILOG 592config SERIAL_IP22_ZILOG
586 tristate "IP22 Zilog8530 serial support" 593 tristate "IP22 Zilog8530 serial support"
587 depends on SGI_IP22 594 depends on SGI_IP22
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index eaf8e01db198..50c221af9e6d 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_SERIAL_PXA) += pxa.o
30obj-$(CONFIG_SERIAL_SA1100) += sa1100.o 30obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
31obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o 31obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
32obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o 32obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o
33obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o
33obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o 34obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o
34obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o 35obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
35obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o 36obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
new file mode 100644
index 000000000000..f137804b3133
--- /dev/null
+++ b/drivers/serial/sunhv.c
@@ -0,0 +1,550 @@
1/* sunhv.c: Serial driver for SUN4V hypervisor console.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/tty.h>
10#include <linux/tty_flip.h>
11#include <linux/major.h>
12#include <linux/circ_buf.h>
13#include <linux/serial.h>
14#include <linux/sysrq.h>
15#include <linux/console.h>
16#include <linux/spinlock.h>
17#include <linux/slab.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20
21#include <asm/hypervisor.h>
22#include <asm/spitfire.h>
23#include <asm/vdev.h>
24#include <asm/oplib.h>
25#include <asm/irq.h>
26
27#if defined(CONFIG_MAGIC_SYSRQ)
28#define SUPPORT_SYSRQ
29#endif
30
31#include <linux/serial_core.h>
32
33#include "suncore.h"
34
35#define CON_BREAK ((long)-1)
36#define CON_HUP ((long)-2)
37
38static inline long hypervisor_con_getchar(long *status)
39{
40 register unsigned long func asm("%o5");
41 register unsigned long arg0 asm("%o0");
42 register unsigned long arg1 asm("%o1");
43
44 func = HV_FAST_CONS_GETCHAR;
45 arg0 = 0;
46 arg1 = 0;
47 __asm__ __volatile__("ta %6"
48 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
49 : "0" (func), "1" (arg0), "2" (arg1),
50 "i" (HV_FAST_TRAP));
51
52 *status = arg0;
53
54 return (long) arg1;
55}
56
57static inline long hypervisor_con_putchar(long ch)
58{
59 register unsigned long func asm("%o5");
60 register unsigned long arg0 asm("%o0");
61
62 func = HV_FAST_CONS_PUTCHAR;
63 arg0 = ch;
64 __asm__ __volatile__("ta %4"
65 : "=&r" (func), "=&r" (arg0)
66 : "0" (func), "1" (arg0), "i" (HV_FAST_TRAP));
67
68 return (long) arg0;
69}
70
71#define IGNORE_BREAK 0x1
72#define IGNORE_ALL 0x2
73
74static int hung_up = 0;
75
76static struct tty_struct *receive_chars(struct uart_port *port, struct pt_regs *regs)
77{
78 struct tty_struct *tty = NULL;
79 int saw_console_brk = 0;
80 int limit = 10000;
81
82 if (port->info != NULL) /* Unopened serial console */
83 tty = port->info->tty;
84
85 while (limit-- > 0) {
86 long status;
87 long c = hypervisor_con_getchar(&status);
88 unsigned char flag;
89
90 if (status == HV_EWOULDBLOCK)
91 break;
92
93 if (c == CON_BREAK) {
94 if (uart_handle_break(port))
95 continue;
96 saw_console_brk = 1;
97 c = 0;
98 }
99
100 if (c == CON_HUP) {
101 hung_up = 1;
102 uart_handle_dcd_change(port, 0);
103 } else if (hung_up) {
104 hung_up = 0;
105 uart_handle_dcd_change(port, 1);
106 }
107
108 if (tty == NULL) {
109 uart_handle_sysrq_char(port, c, regs);
110 continue;
111 }
112
113 flag = TTY_NORMAL;
114 port->icount.rx++;
115 if (c == CON_BREAK) {
116 port->icount.brk++;
117 if (uart_handle_break(port))
118 continue;
119 flag = TTY_BREAK;
120 }
121
122 if (uart_handle_sysrq_char(port, c, regs))
123 continue;
124
125 if ((port->ignore_status_mask & IGNORE_ALL) ||
126 ((port->ignore_status_mask & IGNORE_BREAK) &&
127 (c == CON_BREAK)))
128 continue;
129
130 tty_insert_flip_char(tty, c, flag);
131 }
132
133 if (saw_console_brk)
134 sun_do_break();
135
136 return tty;
137}
138
139static void transmit_chars(struct uart_port *port)
140{
141 struct circ_buf *xmit;
142
143 if (!port->info)
144 return;
145
146 xmit = &port->info->xmit;
147 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
148 return;
149
150 while (!uart_circ_empty(xmit)) {
151 long status = hypervisor_con_putchar(xmit->buf[xmit->tail]);
152
153 if (status != HV_EOK)
154 break;
155
156 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
157 port->icount.tx++;
158 }
159
160 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
161 uart_write_wakeup(port);
162}
163
164static irqreturn_t sunhv_interrupt(int irq, void *dev_id, struct pt_regs *regs)
165{
166 struct uart_port *port = dev_id;
167 struct tty_struct *tty;
168 unsigned long flags;
169
170 spin_lock_irqsave(&port->lock, flags);
171 tty = receive_chars(port, regs);
172 transmit_chars(port);
173 spin_unlock_irqrestore(&port->lock, flags);
174
175 if (tty)
176 tty_flip_buffer_push(tty);
177
178 return IRQ_HANDLED;
179}
180
181/* port->lock is not held. */
182static unsigned int sunhv_tx_empty(struct uart_port *port)
183{
184 /* Transmitter is always empty for us. If the circ buffer
185 * is non-empty or there is an x_char pending, our caller
186 * will do the right thing and ignore what we return here.
187 */
188 return TIOCSER_TEMT;
189}
190
191/* port->lock held by caller. */
192static void sunhv_set_mctrl(struct uart_port *port, unsigned int mctrl)
193{
194 return;
195}
196
197/* port->lock is held by caller and interrupts are disabled. */
198static unsigned int sunhv_get_mctrl(struct uart_port *port)
199{
200 return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
201}
202
203/* port->lock held by caller. */
204static void sunhv_stop_tx(struct uart_port *port)
205{
206 return;
207}
208
209/* port->lock held by caller. */
210static void sunhv_start_tx(struct uart_port *port)
211{
212 struct circ_buf *xmit = &port->info->xmit;
213
214 while (!uart_circ_empty(xmit)) {
215 long status = hypervisor_con_putchar(xmit->buf[xmit->tail]);
216
217 if (status != HV_EOK)
218 break;
219
220 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
221 port->icount.tx++;
222 }
223}
224
225/* port->lock is not held. */
226static void sunhv_send_xchar(struct uart_port *port, char ch)
227{
228 unsigned long flags;
229 int limit = 10000;
230
231 spin_lock_irqsave(&port->lock, flags);
232
233 while (limit-- > 0) {
234 long status = hypervisor_con_putchar(ch);
235 if (status == HV_EOK)
236 break;
237 }
238
239 spin_unlock_irqrestore(&port->lock, flags);
240}
241
242/* port->lock held by caller. */
243static void sunhv_stop_rx(struct uart_port *port)
244{
245}
246
247/* port->lock held by caller. */
248static void sunhv_enable_ms(struct uart_port *port)
249{
250}
251
252/* port->lock is not held. */
253static void sunhv_break_ctl(struct uart_port *port, int break_state)
254{
255 if (break_state) {
256 unsigned long flags;
257 int limit = 1000000;
258
259 spin_lock_irqsave(&port->lock, flags);
260
261 while (limit-- > 0) {
262 long status = hypervisor_con_putchar(CON_BREAK);
263 if (status == HV_EOK)
264 break;
265 udelay(2);
266 }
267
268 spin_unlock_irqrestore(&port->lock, flags);
269 }
270}
271
272/* port->lock is not held. */
273static int sunhv_startup(struct uart_port *port)
274{
275 return 0;
276}
277
278/* port->lock is not held. */
279static void sunhv_shutdown(struct uart_port *port)
280{
281}
282
283/* port->lock is not held. */
284static void sunhv_set_termios(struct uart_port *port, struct termios *termios,
285 struct termios *old)
286{
287 unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
288 unsigned int quot = uart_get_divisor(port, baud);
289 unsigned int iflag, cflag;
290 unsigned long flags;
291
292 spin_lock_irqsave(&port->lock, flags);
293
294 iflag = termios->c_iflag;
295 cflag = termios->c_cflag;
296
297 port->ignore_status_mask = 0;
298 if (iflag & IGNBRK)
299 port->ignore_status_mask |= IGNORE_BREAK;
300 if ((cflag & CREAD) == 0)
301 port->ignore_status_mask |= IGNORE_ALL;
302
303 /* XXX */
304 uart_update_timeout(port, cflag,
305 (port->uartclk / (16 * quot)));
306
307 spin_unlock_irqrestore(&port->lock, flags);
308}
309
310static const char *sunhv_type(struct uart_port *port)
311{
312 return "SUN4V HCONS";
313}
314
315static void sunhv_release_port(struct uart_port *port)
316{
317}
318
319static int sunhv_request_port(struct uart_port *port)
320{
321 return 0;
322}
323
324static void sunhv_config_port(struct uart_port *port, int flags)
325{
326}
327
328static int sunhv_verify_port(struct uart_port *port, struct serial_struct *ser)
329{
330 return -EINVAL;
331}
332
333static struct uart_ops sunhv_pops = {
334 .tx_empty = sunhv_tx_empty,
335 .set_mctrl = sunhv_set_mctrl,
336 .get_mctrl = sunhv_get_mctrl,
337 .stop_tx = sunhv_stop_tx,
338 .start_tx = sunhv_start_tx,
339 .send_xchar = sunhv_send_xchar,
340 .stop_rx = sunhv_stop_rx,
341 .enable_ms = sunhv_enable_ms,
342 .break_ctl = sunhv_break_ctl,
343 .startup = sunhv_startup,
344 .shutdown = sunhv_shutdown,
345 .set_termios = sunhv_set_termios,
346 .type = sunhv_type,
347 .release_port = sunhv_release_port,
348 .request_port = sunhv_request_port,
349 .config_port = sunhv_config_port,
350 .verify_port = sunhv_verify_port,
351};
352
353static struct uart_driver sunhv_reg = {
354 .owner = THIS_MODULE,
355 .driver_name = "serial",
356 .devfs_name = "tts/",
357 .dev_name = "ttyS",
358 .major = TTY_MAJOR,
359};
360
361static struct uart_port *sunhv_port;
362
363static inline void sunhv_console_putchar(struct uart_port *port, char c)
364{
365 unsigned long flags;
366 int limit = 1000000;
367
368 spin_lock_irqsave(&port->lock, flags);
369
370 while (limit-- > 0) {
371 long status = hypervisor_con_putchar(c);
372 if (status == HV_EOK)
373 break;
374 udelay(2);
375 }
376
377 spin_unlock_irqrestore(&port->lock, flags);
378}
379
380static void sunhv_console_write(struct console *con, const char *s, unsigned n)
381{
382 struct uart_port *port = sunhv_port;
383 int i;
384
385 for (i = 0; i < n; i++) {
386 if (*s == '\n')
387 sunhv_console_putchar(port, '\r');
388 sunhv_console_putchar(port, *s++);
389 }
390}
391
392static struct console sunhv_console = {
393 .name = "ttyHV",
394 .write = sunhv_console_write,
395 .device = uart_console_device,
396 .flags = CON_PRINTBUFFER,
397 .index = -1,
398 .data = &sunhv_reg,
399};
400
401static inline struct console *SUNHV_CONSOLE(void)
402{
403 if (con_is_present())
404 return NULL;
405
406 sunhv_console.index = 0;
407
408 return &sunhv_console;
409}
410
411static int __init hv_console_compatible(char *buf, int len)
412{
413 while (len) {
414 int this_len;
415
416 if (!strcmp(buf, "qcn"))
417 return 1;
418
419 this_len = strlen(buf) + 1;
420
421 buf += this_len;
422 len -= this_len;
423 }
424
425 return 0;
426}
427
428static unsigned int __init get_interrupt(void)
429{
430 const char *cons_str = "console";
431 const char *compat_str = "compatible";
432 int node = prom_getchild(sun4v_vdev_root);
433 char buf[64];
434 int err, len;
435
436 node = prom_searchsiblings(node, cons_str);
437 if (!node)
438 return 0;
439
440 len = prom_getproplen(node, compat_str);
441 if (len == 0 || len == -1)
442 return 0;
443
444 err = prom_getproperty(node, compat_str, buf, 64);
445 if (err == -1)
446 return 0;
447
448 if (!hv_console_compatible(buf, len))
449 return 0;
450
451 /* Ok, the this is the OBP node for the sun4v hypervisor
452 * console device. Decode the interrupt.
453 */
454 return sun4v_vdev_device_interrupt(node);
455}
456
457static int __init sunhv_init(void)
458{
459 struct uart_port *port;
460 int ret;
461
462 if (tlb_type != hypervisor)
463 return -ENODEV;
464
465 port = kmalloc(sizeof(struct uart_port), GFP_KERNEL);
466 if (unlikely(!port))
467 return -ENOMEM;
468
469 memset(port, 0, sizeof(struct uart_port));
470
471 port->line = 0;
472 port->ops = &sunhv_pops;
473 port->type = PORT_SUNHV;
474 port->uartclk = ( 29491200 / 16 ); /* arbitrary */
475
476 /* Set this just to make uart_configure_port() happy. */
477 port->membase = (unsigned char __iomem *) __pa(port);
478
479 port->irq = get_interrupt();
480 if (!port->irq) {
481 kfree(port);
482 return -ENODEV;
483 }
484
485 sunhv_reg.minor = sunserial_current_minor;
486 sunhv_reg.nr = 1;
487
488 ret = uart_register_driver(&sunhv_reg);
489 if (ret < 0) {
490 printk(KERN_ERR "SUNHV: uart_register_driver() failed %d\n",
491 ret);
492 kfree(port);
493
494 return ret;
495 }
496
497 sunhv_reg.tty_driver->name_base = sunhv_reg.minor - 64;
498 sunserial_current_minor += 1;
499
500 sunhv_reg.cons = SUNHV_CONSOLE();
501
502 sunhv_port = port;
503
504 ret = uart_add_one_port(&sunhv_reg, port);
505 if (ret < 0) {
506 printk(KERN_ERR "SUNHV: uart_add_one_port() failed %d\n", ret);
507 sunserial_current_minor -= 1;
508 uart_unregister_driver(&sunhv_reg);
509 kfree(port);
510 sunhv_port = NULL;
511 return -ENODEV;
512 }
513
514 if (request_irq(port->irq, sunhv_interrupt,
515 SA_SHIRQ, "serial(sunhv)", port)) {
516 printk(KERN_ERR "sunhv: Cannot register IRQ\n");
517 uart_remove_one_port(&sunhv_reg, port);
518 sunserial_current_minor -= 1;
519 uart_unregister_driver(&sunhv_reg);
520 kfree(port);
521 sunhv_port = NULL;
522 return -ENODEV;
523 }
524
525 return 0;
526}
527
528static void __exit sunhv_exit(void)
529{
530 struct uart_port *port = sunhv_port;
531
532 BUG_ON(!port);
533
534 free_irq(port->irq, port);
535
536 uart_remove_one_port(&sunhv_reg, port);
537 sunserial_current_minor -= 1;
538
539 uart_unregister_driver(&sunhv_reg);
540
541 kfree(sunhv_port);
542 sunhv_port = NULL;
543}
544
545module_init(sunhv_init);
546module_exit(sunhv_exit);
547
548MODULE_AUTHOR("David S. Miller");
549MODULE_DESCRIPTION("SUN4V Hypervisor console driver")
550MODULE_LICENSE("GPL");
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 85664228a0b6..a2fb0c2fb121 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -955,14 +955,13 @@ static struct console sunsab_console = {
955 .index = -1, 955 .index = -1,
956 .data = &sunsab_reg, 956 .data = &sunsab_reg,
957}; 957};
958#define SUNSAB_CONSOLE (&sunsab_console)
959 958
960static void __init sunsab_console_init(void) 959static inline struct console *SUNSAB_CONSOLE(void)
961{ 960{
962 int i; 961 int i;
963 962
964 if (con_is_present()) 963 if (con_is_present())
965 return; 964 return NULL;
966 965
967 for (i = 0; i < num_channels; i++) { 966 for (i = 0; i < num_channels; i++) {
968 int this_minor = sunsab_reg.minor + i; 967 int this_minor = sunsab_reg.minor + i;
@@ -971,13 +970,14 @@ static void __init sunsab_console_init(void)
971 break; 970 break;
972 } 971 }
973 if (i == num_channels) 972 if (i == num_channels)
974 return; 973 return NULL;
975 974
976 sunsab_console.index = i; 975 sunsab_console.index = i;
977 register_console(&sunsab_console); 976
977 return &sunsab_console;
978} 978}
979#else 979#else
980#define SUNSAB_CONSOLE (NULL) 980#define SUNSAB_CONSOLE() (NULL)
981#define sunsab_console_init() do { } while (0) 981#define sunsab_console_init() do { } while (0)
982#endif 982#endif
983 983
@@ -1124,7 +1124,6 @@ static int __init sunsab_init(void)
1124 1124
1125 sunsab_reg.minor = sunserial_current_minor; 1125 sunsab_reg.minor = sunserial_current_minor;
1126 sunsab_reg.nr = num_channels; 1126 sunsab_reg.nr = num_channels;
1127 sunsab_reg.cons = SUNSAB_CONSOLE;
1128 1127
1129 ret = uart_register_driver(&sunsab_reg); 1128 ret = uart_register_driver(&sunsab_reg);
1130 if (ret < 0) { 1129 if (ret < 0) {
@@ -1143,10 +1142,12 @@ static int __init sunsab_init(void)
1143 return ret; 1142 return ret;
1144 } 1143 }
1145 1144
1145 sunsab_reg.tty_driver->name_base = sunsab_reg.minor - 64;
1146
1147 sunsab_reg.cons = SUNSAB_CONSOLE();
1148
1146 sunserial_current_minor += num_channels; 1149 sunserial_current_minor += num_channels;
1147 1150
1148 sunsab_console_init();
1149
1150 for (i = 0; i < num_channels; i++) { 1151 for (i = 0; i < num_channels; i++) {
1151 struct uart_sunsab_port *up = &sunsab_ports[i]; 1152 struct uart_sunsab_port *up = &sunsab_ports[i];
1152 1153
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 4e453fa966ae..46c44b83f57c 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1280,6 +1280,7 @@ static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up, int channel)
1280 struct serio *serio; 1280 struct serio *serio;
1281#endif 1281#endif
1282 1282
1283 spin_lock_init(&up->port.lock);
1283 up->port.line = channel; 1284 up->port.line = channel;
1284 up->port.type = PORT_UNKNOWN; 1285 up->port.type = PORT_UNKNOWN;
1285 up->port.uartclk = (SU_BASE_BAUD * 16); 1286 up->port.uartclk = (SU_BASE_BAUD * 16);
@@ -1464,18 +1465,17 @@ static struct console sunsu_cons = {
1464 .index = -1, 1465 .index = -1,
1465 .data = &sunsu_reg, 1466 .data = &sunsu_reg,
1466}; 1467};
1467#define SUNSU_CONSOLE (&sunsu_cons)
1468 1468
1469/* 1469/*
1470 * Register console. 1470 * Register console.
1471 */ 1471 */
1472 1472
1473static int __init sunsu_serial_console_init(void) 1473static inline struct console *SUNSU_CONSOLE(void)
1474{ 1474{
1475 int i; 1475 int i;
1476 1476
1477 if (con_is_present()) 1477 if (con_is_present())
1478 return 0; 1478 return NULL;
1479 1479
1480 for (i = 0; i < UART_NR; i++) { 1480 for (i = 0; i < UART_NR; i++) {
1481 int this_minor = sunsu_reg.minor + i; 1481 int this_minor = sunsu_reg.minor + i;
@@ -1484,16 +1484,16 @@ static int __init sunsu_serial_console_init(void)
1484 break; 1484 break;
1485 } 1485 }
1486 if (i == UART_NR) 1486 if (i == UART_NR)
1487 return 0; 1487 return NULL;
1488 if (sunsu_ports[i].port_node == 0) 1488 if (sunsu_ports[i].port_node == 0)
1489 return 0; 1489 return NULL;
1490 1490
1491 sunsu_cons.index = i; 1491 sunsu_cons.index = i;
1492 register_console(&sunsu_cons); 1492
1493 return 0; 1493 return &sunsu_cons;
1494} 1494}
1495#else 1495#else
1496#define SUNSU_CONSOLE (NULL) 1496#define SUNSU_CONSOLE() (NULL)
1497#define sunsu_serial_console_init() do { } while (0) 1497#define sunsu_serial_console_init() do { } while (0)
1498#endif 1498#endif
1499 1499
@@ -1510,6 +1510,7 @@ static int __init sunsu_serial_init(void)
1510 up->su_type == SU_PORT_KBD) 1510 up->su_type == SU_PORT_KBD)
1511 continue; 1511 continue;
1512 1512
1513 spin_lock_init(&up->port.lock);
1513 up->port.flags |= UPF_BOOT_AUTOCONF; 1514 up->port.flags |= UPF_BOOT_AUTOCONF;
1514 up->port.type = PORT_UNKNOWN; 1515 up->port.type = PORT_UNKNOWN;
1515 up->port.uartclk = (SU_BASE_BAUD * 16); 1516 up->port.uartclk = (SU_BASE_BAUD * 16);
@@ -1523,16 +1524,19 @@ static int __init sunsu_serial_init(void)
1523 } 1524 }
1524 1525
1525 sunsu_reg.minor = sunserial_current_minor; 1526 sunsu_reg.minor = sunserial_current_minor;
1526 sunserial_current_minor += instance;
1527 1527
1528 sunsu_reg.nr = instance; 1528 sunsu_reg.nr = instance;
1529 sunsu_reg.cons = SUNSU_CONSOLE;
1530 1529
1531 ret = uart_register_driver(&sunsu_reg); 1530 ret = uart_register_driver(&sunsu_reg);
1532 if (ret < 0) 1531 if (ret < 0)
1533 return ret; 1532 return ret;
1534 1533
1535 sunsu_serial_console_init(); 1534 sunsu_reg.tty_driver->name_base = sunsu_reg.minor - 64;
1535
1536 sunserial_current_minor += instance;
1537
1538 sunsu_reg.cons = SUNSU_CONSOLE();
1539
1536 for (i = 0; i < UART_NR; i++) { 1540 for (i = 0; i < UART_NR; i++) {
1537 struct uart_sunsu_port *up = &sunsu_ports[i]; 1541 struct uart_sunsu_port *up = &sunsu_ports[i];
1538 1542
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 5cc4d4c2935c..10b35c6f287d 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1390,7 +1390,6 @@ static struct console sunzilog_console = {
1390 .index = -1, 1390 .index = -1,
1391 .data = &sunzilog_reg, 1391 .data = &sunzilog_reg,
1392}; 1392};
1393#define SUNZILOG_CONSOLE (&sunzilog_console)
1394 1393
1395static int __init sunzilog_console_init(void) 1394static int __init sunzilog_console_init(void)
1396{ 1395{
@@ -1413,8 +1412,31 @@ static int __init sunzilog_console_init(void)
1413 register_console(&sunzilog_console); 1412 register_console(&sunzilog_console);
1414 return 0; 1413 return 0;
1415} 1414}
1415
1416static inline struct console *SUNZILOG_CONSOLE(void)
1417{
1418 int i;
1419
1420 if (con_is_present())
1421 return NULL;
1422
1423 for (i = 0; i < NUM_CHANNELS; i++) {
1424 int this_minor = sunzilog_reg.minor + i;
1425
1426 if ((this_minor - 64) == (serial_console - 1))
1427 break;
1428 }
1429 if (i == NUM_CHANNELS)
1430 return NULL;
1431
1432 sunzilog_console.index = i;
1433 sunzilog_port_table[i].flags |= SUNZILOG_FLAG_IS_CONS;
1434
1435 return &sunzilog_console;
1436}
1437
1416#else 1438#else
1417#define SUNZILOG_CONSOLE (NULL) 1439#define SUNZILOG_CONSOLE() (NULL)
1418#define sunzilog_console_init() do { } while (0) 1440#define sunzilog_console_init() do { } while (0)
1419#endif 1441#endif
1420 1442
@@ -1666,14 +1688,15 @@ static int __init sunzilog_ports_init(void)
1666 } 1688 }
1667 1689
1668 sunzilog_reg.nr = uart_count; 1690 sunzilog_reg.nr = uart_count;
1669 sunzilog_reg.cons = SUNZILOG_CONSOLE;
1670
1671 sunzilog_reg.minor = sunserial_current_minor; 1691 sunzilog_reg.minor = sunserial_current_minor;
1672 sunserial_current_minor += uart_count;
1673 1692
1674 ret = uart_register_driver(&sunzilog_reg); 1693 ret = uart_register_driver(&sunzilog_reg);
1675 if (ret == 0) { 1694 if (ret == 0) {
1676 sunzilog_console_init(); 1695 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64;
1696 sunzilog_reg.cons = SUNZILOG_CONSOLE();
1697
1698 sunserial_current_minor += uart_count;
1699
1677 for (i = 0; i < NUM_CHANNELS; i++) { 1700 for (i = 0; i < NUM_CHANNELS; i++) {
1678 struct uart_sunzilog_port *up = &sunzilog_port_table[i]; 1701 struct uart_sunzilog_port *up = &sunzilog_port_table[i];
1679 1702
diff --git a/fs/jfs/Makefile b/fs/jfs/Makefile
index 6f1e0e95587a..3adb6395e42d 100644
--- a/fs/jfs/Makefile
+++ b/fs/jfs/Makefile
@@ -8,7 +8,8 @@ jfs-y := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \
8 jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \ 8 jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \
9 jfs_unicode.o jfs_dtree.o jfs_inode.o \ 9 jfs_unicode.o jfs_dtree.o jfs_inode.o \
10 jfs_extent.o symlink.o jfs_metapage.o \ 10 jfs_extent.o symlink.o jfs_metapage.o \
11 jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o resize.o xattr.o 11 jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o \
12 resize.o xattr.o ioctl.o
12 13
13jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o 14jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o
14 15
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 461e4934ca7c..e2281300979c 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -183,6 +183,9 @@ cleanup:
183 posix_acl_release(acl); 183 posix_acl_release(acl);
184 } else 184 } else
185 inode->i_mode &= ~current->fs->umask; 185 inode->i_mode &= ~current->fs->umask;
186
187 JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) |
188 inode->i_mode;
186 189
187 return rc; 190 return rc;
188} 191}
@@ -207,12 +210,12 @@ static int jfs_acl_chmod(struct inode *inode)
207 rc = posix_acl_chmod_masq(clone, inode->i_mode); 210 rc = posix_acl_chmod_masq(clone, inode->i_mode);
208 if (!rc) { 211 if (!rc) {
209 tid_t tid = txBegin(inode->i_sb, 0); 212 tid_t tid = txBegin(inode->i_sb, 0);
210 down(&JFS_IP(inode)->commit_sem); 213 mutex_lock(&JFS_IP(inode)->commit_mutex);
211 rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); 214 rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone);
212 if (!rc) 215 if (!rc)
213 rc = txCommit(tid, 1, &inode, 0); 216 rc = txCommit(tid, 1, &inode, 0);
214 txEnd(tid); 217 txEnd(tid);
215 up(&JFS_IP(inode)->commit_sem); 218 mutex_unlock(&JFS_IP(inode)->commit_mutex);
216 } 219 }
217 220
218 posix_acl_release(clone); 221 posix_acl_release(clone);
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index c2c19c9ed9a4..e1ac6e497e2b 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -113,4 +113,5 @@ struct file_operations jfs_file_operations = {
113 .sendfile = generic_file_sendfile, 113 .sendfile = generic_file_sendfile,
114 .fsync = jfs_fsync, 114 .fsync = jfs_fsync,
115 .release = jfs_release, 115 .release = jfs_release,
116 .ioctl = jfs_ioctl,
116}; 117};
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9f942ca8e4e3..51a5fed90cca 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -55,6 +55,7 @@ void jfs_read_inode(struct inode *inode)
55 inode->i_op = &jfs_file_inode_operations; 55 inode->i_op = &jfs_file_inode_operations;
56 init_special_inode(inode, inode->i_mode, inode->i_rdev); 56 init_special_inode(inode, inode->i_mode, inode->i_rdev);
57 } 57 }
58 jfs_set_inode_flags(inode);
58} 59}
59 60
60/* 61/*
@@ -89,16 +90,16 @@ int jfs_commit_inode(struct inode *inode, int wait)
89 } 90 }
90 91
91 tid = txBegin(inode->i_sb, COMMIT_INODE); 92 tid = txBegin(inode->i_sb, COMMIT_INODE);
92 down(&JFS_IP(inode)->commit_sem); 93 mutex_lock(&JFS_IP(inode)->commit_mutex);
93 94
94 /* 95 /*
95 * Retest inode state after taking commit_sem 96 * Retest inode state after taking commit_mutex
96 */ 97 */
97 if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode)) 98 if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
98 rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0); 99 rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
99 100
100 txEnd(tid); 101 txEnd(tid);
101 up(&JFS_IP(inode)->commit_sem); 102 mutex_unlock(&JFS_IP(inode)->commit_mutex);
102 return rc; 103 return rc;
103} 104}
104 105
@@ -335,18 +336,18 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
335 tid = txBegin(ip->i_sb, 0); 336 tid = txBegin(ip->i_sb, 0);
336 337
337 /* 338 /*
338 * The commit_sem cannot be taken before txBegin. 339 * The commit_mutex cannot be taken before txBegin.
339 * txBegin may block and there is a chance the inode 340 * txBegin may block and there is a chance the inode
340 * could be marked dirty and need to be committed 341 * could be marked dirty and need to be committed
341 * before txBegin unblocks 342 * before txBegin unblocks
342 */ 343 */
343 down(&JFS_IP(ip)->commit_sem); 344 mutex_lock(&JFS_IP(ip)->commit_mutex);
344 345
345 newsize = xtTruncate(tid, ip, length, 346 newsize = xtTruncate(tid, ip, length,
346 COMMIT_TRUNCATE | COMMIT_PWMAP); 347 COMMIT_TRUNCATE | COMMIT_PWMAP);
347 if (newsize < 0) { 348 if (newsize < 0) {
348 txEnd(tid); 349 txEnd(tid);
349 up(&JFS_IP(ip)->commit_sem); 350 mutex_unlock(&JFS_IP(ip)->commit_mutex);
350 break; 351 break;
351 } 352 }
352 353
@@ -355,7 +356,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
355 356
356 txCommit(tid, 1, &ip, 0); 357 txCommit(tid, 1, &ip, 0);
357 txEnd(tid); 358 txEnd(tid);
358 up(&JFS_IP(ip)->commit_sem); 359 mutex_unlock(&JFS_IP(ip)->commit_mutex);
359 } while (newsize > length); /* Truncate isn't always atomic */ 360 } while (newsize > length); /* Truncate isn't always atomic */
360} 361}
361 362
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
new file mode 100644
index 000000000000..67b3774820eb
--- /dev/null
+++ b/fs/jfs/ioctl.c
@@ -0,0 +1,107 @@
1/*
2 * linux/fs/jfs/ioctl.c
3 *
4 * Copyright (C) 2006 Herbert Poetzl
5 * adapted from Remy Card's ext2/ioctl.c
6 */
7
8#include <linux/fs.h>
9#include <linux/ext2_fs.h>
10#include <linux/ctype.h>
11#include <linux/capability.h>
12#include <linux/time.h>
13#include <asm/current.h>
14#include <asm/uaccess.h>
15
16#include "jfs_incore.h"
17#include "jfs_dinode.h"
18#include "jfs_inode.h"
19
20
21static struct {
22 long jfs_flag;
23 long ext2_flag;
24} jfs_map[] = {
25 {JFS_NOATIME_FL, EXT2_NOATIME_FL},
26 {JFS_DIRSYNC_FL, EXT2_DIRSYNC_FL},
27 {JFS_SYNC_FL, EXT2_SYNC_FL},
28 {JFS_SECRM_FL, EXT2_SECRM_FL},
29 {JFS_UNRM_FL, EXT2_UNRM_FL},
30 {JFS_APPEND_FL, EXT2_APPEND_FL},
31 {JFS_IMMUTABLE_FL, EXT2_IMMUTABLE_FL},
32 {0, 0},
33};
34
35static long jfs_map_ext2(unsigned long flags, int from)
36{
37 int index=0;
38 long mapped=0;
39
40 while (jfs_map[index].jfs_flag) {
41 if (from) {
42 if (jfs_map[index].ext2_flag & flags)
43 mapped |= jfs_map[index].jfs_flag;
44 } else {
45 if (jfs_map[index].jfs_flag & flags)
46 mapped |= jfs_map[index].ext2_flag;
47 }
48 index++;
49 }
50 return mapped;
51}
52
53
54int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
55 unsigned long arg)
56{
57 struct jfs_inode_info *jfs_inode = JFS_IP(inode);
58 unsigned int flags;
59
60 switch (cmd) {
61 case JFS_IOC_GETFLAGS:
62 flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE;
63 flags = jfs_map_ext2(flags, 0);
64 return put_user(flags, (int __user *) arg);
65 case JFS_IOC_SETFLAGS: {
66 unsigned int oldflags;
67
68 if (IS_RDONLY(inode))
69 return -EROFS;
70
71 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
72 return -EACCES;
73
74 if (get_user(flags, (int __user *) arg))
75 return -EFAULT;
76
77 flags = jfs_map_ext2(flags, 1);
78 if (!S_ISDIR(inode->i_mode))
79 flags &= ~JFS_DIRSYNC_FL;
80
81 oldflags = jfs_inode->mode2;
82
83 /*
84 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
85 * the relevant capability.
86 */
87 if ((oldflags & JFS_IMMUTABLE_FL) ||
88 ((flags ^ oldflags) &
89 (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
90 if (!capable(CAP_LINUX_IMMUTABLE))
91 return -EPERM;
92 }
93
94 flags = flags & JFS_FL_USER_MODIFIABLE;
95 flags |= oldflags & ~JFS_FL_USER_MODIFIABLE;
96 jfs_inode->mode2 = flags;
97
98 jfs_set_inode_flags(inode);
99 inode->i_ctime = CURRENT_TIME_SEC;
100 mark_inode_dirty(inode);
101 return 0;
102 }
103 default:
104 return -ENOTTY;
105 }
106}
107
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index 580a3258449b..9f2572aea561 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -139,13 +139,36 @@ struct dinode {
139 139
140/* more extended mode bits: attributes for OS/2 */ 140/* more extended mode bits: attributes for OS/2 */
141#define IREADONLY 0x02000000 /* no write access to file */ 141#define IREADONLY 0x02000000 /* no write access to file */
142#define IARCHIVE 0x40000000 /* file archive bit */
143#define ISYSTEM 0x08000000 /* system file */
144#define IHIDDEN 0x04000000 /* hidden file */ 142#define IHIDDEN 0x04000000 /* hidden file */
145#define IRASH 0x4E000000 /* mask for changeable attributes */ 143#define ISYSTEM 0x08000000 /* system file */
146#define INEWNAME 0x80000000 /* non-8.3 filename format */ 144
147#define IDIRECTORY 0x20000000 /* directory (shadow of real bit) */ 145#define IDIRECTORY 0x20000000 /* directory (shadow of real bit) */
146#define IARCHIVE 0x40000000 /* file archive bit */
147#define INEWNAME 0x80000000 /* non-8.3 filename format */
148
149#define IRASH 0x4E000000 /* mask for changeable attributes */
148#define ATTRSHIFT 25 /* bits to shift to move attribute 150#define ATTRSHIFT 25 /* bits to shift to move attribute
149 specification to mode position */ 151 specification to mode position */
150 152
153/* extended attributes for Linux */
154
155#define JFS_NOATIME_FL 0x00080000 /* do not update atime */
156
157#define JFS_DIRSYNC_FL 0x00100000 /* dirsync behaviour */
158#define JFS_SYNC_FL 0x00200000 /* Synchronous updates */
159#define JFS_SECRM_FL 0x00400000 /* Secure deletion */
160#define JFS_UNRM_FL 0x00800000 /* allow for undelete */
161
162#define JFS_APPEND_FL 0x01000000 /* writes to file may only append */
163#define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */
164
165#define JFS_FL_USER_VISIBLE 0x03F80000
166#define JFS_FL_USER_MODIFIABLE 0x03F80000
167#define JFS_FL_INHERIT 0x03C80000
168
169/* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */
170#define JFS_IOC_GETFLAGS _IOR('f', 1, long)
171#define JFS_IOC_SETFLAGS _IOW('f', 2, long)
172
173
151#endif /*_H_JFS_DINODE */ 174#endif /*_H_JFS_DINODE */
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 79b5404db100..c161c98954e0 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -64,9 +64,9 @@
64 * to the persistent bitmaps in dmaps) is guarded by (busy) buffers. 64 * to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
65 */ 65 */
66 66
67#define BMAP_LOCK_INIT(bmp) init_MUTEX(&bmp->db_bmaplock) 67#define BMAP_LOCK_INIT(bmp) mutex_init(&bmp->db_bmaplock)
68#define BMAP_LOCK(bmp) down(&bmp->db_bmaplock) 68#define BMAP_LOCK(bmp) mutex_lock(&bmp->db_bmaplock)
69#define BMAP_UNLOCK(bmp) up(&bmp->db_bmaplock) 69#define BMAP_UNLOCK(bmp) mutex_unlock(&bmp->db_bmaplock)
70 70
71/* 71/*
72 * forward references 72 * forward references
@@ -125,7 +125,7 @@ static int dbGetL2AGSize(s64 nblocks);
125 * into the table, with the table elements yielding the maximum 125 * into the table, with the table elements yielding the maximum
126 * binary buddy of free bits within the character. 126 * binary buddy of free bits within the character.
127 */ 127 */
128static s8 budtab[256] = { 128static const s8 budtab[256] = {
129 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 129 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
130 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 130 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
131 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 131 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
index 32e25884e7e8..8b14cc8e0228 100644
--- a/fs/jfs/jfs_dmap.h
+++ b/fs/jfs/jfs_dmap.h
@@ -243,7 +243,7 @@ struct dbmap {
243struct bmap { 243struct bmap {
244 struct dbmap db_bmap; /* on-disk aggregate map descriptor */ 244 struct dbmap db_bmap; /* on-disk aggregate map descriptor */
245 struct inode *db_ipbmap; /* ptr to aggregate map incore inode */ 245 struct inode *db_ipbmap; /* ptr to aggregate map incore inode */
246 struct semaphore db_bmaplock; /* aggregate map lock */ 246 struct mutex db_bmaplock; /* aggregate map lock */
247 atomic_t db_active[MAXAG]; /* count of active, open files in AG */ 247 atomic_t db_active[MAXAG]; /* count of active, open files in AG */
248 u32 *db_DBmap; 248 u32 *db_DBmap;
249}; 249};
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 404f33eae507..6c3f08319846 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -1005,6 +1005,9 @@ static int dtSplitUp(tid_t tid,
1005 1005
1006 DT_PUTPAGE(smp); 1006 DT_PUTPAGE(smp);
1007 1007
1008 if (!DO_INDEX(ip))
1009 ip->i_size = xlen << sbi->l2bsize;
1010
1008 goto freeKeyName; 1011 goto freeKeyName;
1009 } 1012 }
1010 1013
@@ -1055,7 +1058,9 @@ static int dtSplitUp(tid_t tid,
1055 xaddr = addressPXD(pxd) + xlen; 1058 xaddr = addressPXD(pxd) + xlen;
1056 dbFree(ip, xaddr, (s64) n); 1059 dbFree(ip, xaddr, (s64) n);
1057 } 1060 }
1058 } 1061 } else if (!DO_INDEX(ip))
1062 ip->i_size = lengthPXD(pxd) << sbi->l2bsize;
1063
1059 1064
1060 extendOut: 1065 extendOut:
1061 DT_PUTPAGE(smp); 1066 DT_PUTPAGE(smp);
@@ -1098,6 +1103,9 @@ static int dtSplitUp(tid_t tid,
1098 goto splitOut; 1103 goto splitOut;
1099 } 1104 }
1100 1105
1106 if (!DO_INDEX(ip))
1107 ip->i_size += PSIZE;
1108
1101 /* 1109 /*
1102 * propagate up the router entry for the leaf page just split 1110 * propagate up the router entry for the leaf page just split
1103 * 1111 *
@@ -2424,6 +2432,9 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
2424 break; 2432 break;
2425 } 2433 }
2426 2434
2435 if (!DO_INDEX(ip))
2436 ip->i_size -= PSIZE;
2437
2427 return 0; 2438 return 0;
2428} 2439}
2429 2440
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 4879603daa1c..5549378358bf 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -94,7 +94,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
94 txBeginAnon(ip->i_sb); 94 txBeginAnon(ip->i_sb);
95 95
96 /* Avoid race with jfs_commit_inode() */ 96 /* Avoid race with jfs_commit_inode() */
97 down(&JFS_IP(ip)->commit_sem); 97 mutex_lock(&JFS_IP(ip)->commit_mutex);
98 98
99 /* validate extent length */ 99 /* validate extent length */
100 if (xlen > MAXXLEN) 100 if (xlen > MAXXLEN)
@@ -136,14 +136,14 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
136 */ 136 */
137 nxlen = xlen; 137 nxlen = xlen;
138 if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) { 138 if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) {
139 up(&JFS_IP(ip)->commit_sem); 139 mutex_unlock(&JFS_IP(ip)->commit_mutex);
140 return (rc); 140 return (rc);
141 } 141 }
142 142
143 /* Allocate blocks to quota. */ 143 /* Allocate blocks to quota. */
144 if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { 144 if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
145 dbFree(ip, nxaddr, (s64) nxlen); 145 dbFree(ip, nxaddr, (s64) nxlen);
146 up(&JFS_IP(ip)->commit_sem); 146 mutex_unlock(&JFS_IP(ip)->commit_mutex);
147 return -EDQUOT; 147 return -EDQUOT;
148 } 148 }
149 149
@@ -165,7 +165,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
165 if (rc) { 165 if (rc) {
166 dbFree(ip, nxaddr, nxlen); 166 dbFree(ip, nxaddr, nxlen);
167 DQUOT_FREE_BLOCK(ip, nxlen); 167 DQUOT_FREE_BLOCK(ip, nxlen);
168 up(&JFS_IP(ip)->commit_sem); 168 mutex_unlock(&JFS_IP(ip)->commit_mutex);
169 return (rc); 169 return (rc);
170 } 170 }
171 171
@@ -177,7 +177,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
177 177
178 mark_inode_dirty(ip); 178 mark_inode_dirty(ip);
179 179
180 up(&JFS_IP(ip)->commit_sem); 180 mutex_unlock(&JFS_IP(ip)->commit_mutex);
181 /* 181 /*
182 * COMMIT_SyncList flags an anonymous tlock on page that is on 182 * COMMIT_SyncList flags an anonymous tlock on page that is on
183 * sync list. 183 * sync list.
@@ -222,7 +222,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
222 /* This blocks if we are low on resources */ 222 /* This blocks if we are low on resources */
223 txBeginAnon(ip->i_sb); 223 txBeginAnon(ip->i_sb);
224 224
225 down(&JFS_IP(ip)->commit_sem); 225 mutex_lock(&JFS_IP(ip)->commit_mutex);
226 /* validate extent length */ 226 /* validate extent length */
227 if (nxlen > MAXXLEN) 227 if (nxlen > MAXXLEN)
228 nxlen = MAXXLEN; 228 nxlen = MAXXLEN;
@@ -258,7 +258,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
258 /* Allocat blocks to quota. */ 258 /* Allocat blocks to quota. */
259 if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { 259 if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
260 dbFree(ip, nxaddr, (s64) nxlen); 260 dbFree(ip, nxaddr, (s64) nxlen);
261 up(&JFS_IP(ip)->commit_sem); 261 mutex_unlock(&JFS_IP(ip)->commit_mutex);
262 return -EDQUOT; 262 return -EDQUOT;
263 } 263 }
264 264
@@ -338,7 +338,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
338 338
339 mark_inode_dirty(ip); 339 mark_inode_dirty(ip);
340exit: 340exit:
341 up(&JFS_IP(ip)->commit_sem); 341 mutex_unlock(&JFS_IP(ip)->commit_mutex);
342 return (rc); 342 return (rc);
343} 343}
344#endif /* _NOTYET */ 344#endif /* _NOTYET */
@@ -439,12 +439,12 @@ int extRecord(struct inode *ip, xad_t * xp)
439 439
440 txBeginAnon(ip->i_sb); 440 txBeginAnon(ip->i_sb);
441 441
442 down(&JFS_IP(ip)->commit_sem); 442 mutex_lock(&JFS_IP(ip)->commit_mutex);
443 443
444 /* update the extent */ 444 /* update the extent */
445 rc = xtUpdate(0, ip, xp); 445 rc = xtUpdate(0, ip, xp);
446 446
447 up(&JFS_IP(ip)->commit_sem); 447 mutex_unlock(&JFS_IP(ip)->commit_mutex);
448 return rc; 448 return rc;
449} 449}
450 450
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 4efa0d0eec39..ccbe60aff83d 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -66,14 +66,14 @@ static HLIST_HEAD(aggregate_hash);
66 * imap locks 66 * imap locks
67 */ 67 */
68/* iag free list lock */ 68/* iag free list lock */
69#define IAGFREE_LOCK_INIT(imap) init_MUTEX(&imap->im_freelock) 69#define IAGFREE_LOCK_INIT(imap) mutex_init(&imap->im_freelock)
70#define IAGFREE_LOCK(imap) down(&imap->im_freelock) 70#define IAGFREE_LOCK(imap) mutex_lock(&imap->im_freelock)
71#define IAGFREE_UNLOCK(imap) up(&imap->im_freelock) 71#define IAGFREE_UNLOCK(imap) mutex_unlock(&imap->im_freelock)
72 72
73/* per ag iag list locks */ 73/* per ag iag list locks */
74#define AG_LOCK_INIT(imap,index) init_MUTEX(&(imap->im_aglock[index])) 74#define AG_LOCK_INIT(imap,index) mutex_init(&(imap->im_aglock[index]))
75#define AG_LOCK(imap,agno) down(&imap->im_aglock[agno]) 75#define AG_LOCK(imap,agno) mutex_lock(&imap->im_aglock[agno])
76#define AG_UNLOCK(imap,agno) up(&imap->im_aglock[agno]) 76#define AG_UNLOCK(imap,agno) mutex_unlock(&imap->im_aglock[agno])
77 77
78/* 78/*
79 * forward references 79 * forward references
@@ -1261,7 +1261,7 @@ int diFree(struct inode *ip)
1261 * to be freed by the transaction; 1261 * to be freed by the transaction;
1262 */ 1262 */
1263 tid = txBegin(ipimap->i_sb, COMMIT_FORCE); 1263 tid = txBegin(ipimap->i_sb, COMMIT_FORCE);
1264 down(&JFS_IP(ipimap)->commit_sem); 1264 mutex_lock(&JFS_IP(ipimap)->commit_mutex);
1265 1265
1266 /* acquire tlock of the iag page of the freed ixad 1266 /* acquire tlock of the iag page of the freed ixad
1267 * to force the page NOHOMEOK (even though no data is 1267 * to force the page NOHOMEOK (even though no data is
@@ -1294,7 +1294,7 @@ int diFree(struct inode *ip)
1294 rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); 1294 rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
1295 1295
1296 txEnd(tid); 1296 txEnd(tid);
1297 up(&JFS_IP(ipimap)->commit_sem); 1297 mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
1298 1298
1299 /* unlock the AG inode map information */ 1299 /* unlock the AG inode map information */
1300 AG_UNLOCK(imap, agno); 1300 AG_UNLOCK(imap, agno);
@@ -2554,13 +2554,13 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2554 * addressing structure pointing to the new iag page; 2554 * addressing structure pointing to the new iag page;
2555 */ 2555 */
2556 tid = txBegin(sb, COMMIT_FORCE); 2556 tid = txBegin(sb, COMMIT_FORCE);
2557 down(&JFS_IP(ipimap)->commit_sem); 2557 mutex_lock(&JFS_IP(ipimap)->commit_mutex);
2558 2558
2559 /* update the inode map addressing structure to point to it */ 2559 /* update the inode map addressing structure to point to it */
2560 if ((rc = 2560 if ((rc =
2561 xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) { 2561 xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) {
2562 txEnd(tid); 2562 txEnd(tid);
2563 up(&JFS_IP(ipimap)->commit_sem); 2563 mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
2564 /* Free the blocks allocated for the iag since it was 2564 /* Free the blocks allocated for the iag since it was
2565 * not successfully added to the inode map 2565 * not successfully added to the inode map
2566 */ 2566 */
@@ -2626,7 +2626,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2626 rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); 2626 rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
2627 2627
2628 txEnd(tid); 2628 txEnd(tid);
2629 up(&JFS_IP(ipimap)->commit_sem); 2629 mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
2630 2630
2631 duplicateIXtree(sb, blkno, xlen, &xaddr); 2631 duplicateIXtree(sb, blkno, xlen, &xaddr);
2632 2632
@@ -3074,14 +3074,40 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
3074static int copy_from_dinode(struct dinode * dip, struct inode *ip) 3074static int copy_from_dinode(struct dinode * dip, struct inode *ip)
3075{ 3075{
3076 struct jfs_inode_info *jfs_ip = JFS_IP(ip); 3076 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
3077 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
3077 3078
3078 jfs_ip->fileset = le32_to_cpu(dip->di_fileset); 3079 jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
3079 jfs_ip->mode2 = le32_to_cpu(dip->di_mode); 3080 jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
3080 3081
3081 ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff; 3082 ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff;
3083 if (sbi->umask != -1) {
3084 ip->i_mode = (ip->i_mode & ~0777) | (0777 & ~sbi->umask);
3085 /* For directories, add x permission if r is allowed by umask */
3086 if (S_ISDIR(ip->i_mode)) {
3087 if (ip->i_mode & 0400)
3088 ip->i_mode |= 0100;
3089 if (ip->i_mode & 0040)
3090 ip->i_mode |= 0010;
3091 if (ip->i_mode & 0004)
3092 ip->i_mode |= 0001;
3093 }
3094 }
3082 ip->i_nlink = le32_to_cpu(dip->di_nlink); 3095 ip->i_nlink = le32_to_cpu(dip->di_nlink);
3083 ip->i_uid = le32_to_cpu(dip->di_uid); 3096
3084 ip->i_gid = le32_to_cpu(dip->di_gid); 3097 jfs_ip->saved_uid = le32_to_cpu(dip->di_uid);
3098 if (sbi->uid == -1)
3099 ip->i_uid = jfs_ip->saved_uid;
3100 else {
3101 ip->i_uid = sbi->uid;
3102 }
3103
3104 jfs_ip->saved_gid = le32_to_cpu(dip->di_gid);
3105 if (sbi->gid == -1)
3106 ip->i_gid = jfs_ip->saved_gid;
3107 else {
3108 ip->i_gid = sbi->gid;
3109 }
3110
3085 ip->i_size = le64_to_cpu(dip->di_size); 3111 ip->i_size = le64_to_cpu(dip->di_size);
3086 ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec); 3112 ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
3087 ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec); 3113 ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
@@ -3132,21 +3158,33 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
3132static void copy_to_dinode(struct dinode * dip, struct inode *ip) 3158static void copy_to_dinode(struct dinode * dip, struct inode *ip)
3133{ 3159{
3134 struct jfs_inode_info *jfs_ip = JFS_IP(ip); 3160 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
3161 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
3135 3162
3136 dip->di_fileset = cpu_to_le32(jfs_ip->fileset); 3163 dip->di_fileset = cpu_to_le32(jfs_ip->fileset);
3137 dip->di_inostamp = cpu_to_le32(JFS_SBI(ip->i_sb)->inostamp); 3164 dip->di_inostamp = cpu_to_le32(sbi->inostamp);
3138 dip->di_number = cpu_to_le32(ip->i_ino); 3165 dip->di_number = cpu_to_le32(ip->i_ino);
3139 dip->di_gen = cpu_to_le32(ip->i_generation); 3166 dip->di_gen = cpu_to_le32(ip->i_generation);
3140 dip->di_size = cpu_to_le64(ip->i_size); 3167 dip->di_size = cpu_to_le64(ip->i_size);
3141 dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); 3168 dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
3142 dip->di_nlink = cpu_to_le32(ip->i_nlink); 3169 dip->di_nlink = cpu_to_le32(ip->i_nlink);
3143 dip->di_uid = cpu_to_le32(ip->i_uid); 3170 if (sbi->uid == -1)
3144 dip->di_gid = cpu_to_le32(ip->i_gid); 3171 dip->di_uid = cpu_to_le32(ip->i_uid);
3172 else
3173 dip->di_uid = cpu_to_le32(jfs_ip->saved_uid);
3174 if (sbi->gid == -1)
3175 dip->di_gid = cpu_to_le32(ip->i_gid);
3176 else
3177 dip->di_gid = cpu_to_le32(jfs_ip->saved_gid);
3145 /* 3178 /*
3146 * mode2 is only needed for storing the higher order bits. 3179 * mode2 is only needed for storing the higher order bits.
3147 * Trust i_mode for the lower order ones 3180 * Trust i_mode for the lower order ones
3148 */ 3181 */
3149 dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) | ip->i_mode); 3182 if (sbi->umask == -1)
3183 dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) |
3184 ip->i_mode);
3185 else /* Leave the original permissions alone */
3186 dip->di_mode = cpu_to_le32(jfs_ip->mode2);
3187
3150 dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec); 3188 dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
3151 dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec); 3189 dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
3152 dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec); 3190 dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec);
diff --git a/fs/jfs/jfs_imap.h b/fs/jfs/jfs_imap.h
index 6b59adec036a..6e24465f0f98 100644
--- a/fs/jfs/jfs_imap.h
+++ b/fs/jfs/jfs_imap.h
@@ -140,8 +140,8 @@ struct dinomap {
140struct inomap { 140struct inomap {
141 struct dinomap im_imap; /* 4096: inode allocation control */ 141 struct dinomap im_imap; /* 4096: inode allocation control */
142 struct inode *im_ipimap; /* 4: ptr to inode for imap */ 142 struct inode *im_ipimap; /* 4: ptr to inode for imap */
143 struct semaphore im_freelock; /* 4: iag free list lock */ 143 struct mutex im_freelock; /* 4: iag free list lock */
144 struct semaphore im_aglock[MAXAG]; /* 512: per AG locks */ 144 struct mutex im_aglock[MAXAG]; /* 512: per AG locks */
145 u32 *im_DBGdimap; 145 u32 *im_DBGdimap;
146 atomic_t im_numinos; /* num of backed inodes */ 146 atomic_t im_numinos; /* num of backed inodes */
147 atomic_t im_numfree; /* num of free backed inodes */ 147 atomic_t im_numfree; /* num of free backed inodes */
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index dc21a5bd54d4..54d73716ca8c 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -19,6 +19,7 @@
19#ifndef _H_JFS_INCORE 19#ifndef _H_JFS_INCORE
20#define _H_JFS_INCORE 20#define _H_JFS_INCORE
21 21
22#include <linux/mutex.h>
22#include <linux/rwsem.h> 23#include <linux/rwsem.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/bitops.h> 25#include <linux/bitops.h>
@@ -37,6 +38,8 @@
37struct jfs_inode_info { 38struct jfs_inode_info {
38 int fileset; /* fileset number (always 16)*/ 39 int fileset; /* fileset number (always 16)*/
39 uint mode2; /* jfs-specific mode */ 40 uint mode2; /* jfs-specific mode */
41 uint saved_uid; /* saved for uid mount option */
42 uint saved_gid; /* saved for gid mount option */
40 pxd_t ixpxd; /* inode extent descriptor */ 43 pxd_t ixpxd; /* inode extent descriptor */
41 dxd_t acl; /* dxd describing acl */ 44 dxd_t acl; /* dxd describing acl */
42 dxd_t ea; /* dxd describing ea */ 45 dxd_t ea; /* dxd describing ea */
@@ -62,12 +65,12 @@ struct jfs_inode_info {
62 */ 65 */
63 struct rw_semaphore rdwrlock; 66 struct rw_semaphore rdwrlock;
64 /* 67 /*
65 * commit_sem serializes transaction processing on an inode. 68 * commit_mutex serializes transaction processing on an inode.
66 * It must be taken after beginning a transaction (txBegin), since 69 * It must be taken after beginning a transaction (txBegin), since
67 * dirty inodes may be committed while a new transaction on the 70 * dirty inodes may be committed while a new transaction on the
68 * inode is blocked in txBegin or TxBeginAnon 71 * inode is blocked in txBegin or TxBeginAnon
69 */ 72 */
70 struct semaphore commit_sem; 73 struct mutex commit_mutex;
71 /* xattr_sem allows us to access the xattrs without taking i_mutex */ 74 /* xattr_sem allows us to access the xattrs without taking i_mutex */
72 struct rw_semaphore xattr_sem; 75 struct rw_semaphore xattr_sem;
73 lid_t xtlid; /* lid of xtree lock on directory */ 76 lid_t xtlid; /* lid of xtree lock on directory */
@@ -169,6 +172,9 @@ struct jfs_sb_info {
169 uint state; /* mount/recovery state */ 172 uint state; /* mount/recovery state */
170 unsigned long flag; /* mount time flags */ 173 unsigned long flag; /* mount time flags */
171 uint p_state; /* state prior to going no integrity */ 174 uint p_state; /* state prior to going no integrity */
175 uint uid; /* uid to override on-disk uid */
176 uint gid; /* gid to override on-disk gid */
177 uint umask; /* umask to override on-disk umask */
172}; 178};
173 179
174/* jfs_sb_info commit_state */ 180/* jfs_sb_info commit_state */
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 2af5efbfd06f..495df402916d 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -25,6 +25,26 @@
25#include "jfs_dinode.h" 25#include "jfs_dinode.h"
26#include "jfs_debug.h" 26#include "jfs_debug.h"
27 27
28
29void jfs_set_inode_flags(struct inode *inode)
30{
31 unsigned int flags = JFS_IP(inode)->mode2;
32
33 inode->i_flags &= ~(S_IMMUTABLE | S_APPEND |
34 S_NOATIME | S_DIRSYNC | S_SYNC);
35
36 if (flags & JFS_IMMUTABLE_FL)
37 inode->i_flags |= S_IMMUTABLE;
38 if (flags & JFS_APPEND_FL)
39 inode->i_flags |= S_APPEND;
40 if (flags & JFS_NOATIME_FL)
41 inode->i_flags |= S_NOATIME;
42 if (flags & JFS_DIRSYNC_FL)
43 inode->i_flags |= S_DIRSYNC;
44 if (flags & JFS_SYNC_FL)
45 inode->i_flags |= S_SYNC;
46}
47
28/* 48/*
29 * NAME: ialloc() 49 * NAME: ialloc()
30 * 50 *
@@ -63,6 +83,13 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
63 inode->i_gid = current->fsgid; 83 inode->i_gid = current->fsgid;
64 84
65 /* 85 /*
86 * New inodes need to save sane values on disk when
87 * uid & gid mount options are used
88 */
89 jfs_inode->saved_uid = inode->i_uid;
90 jfs_inode->saved_gid = inode->i_gid;
91
92 /*
66 * Allocate inode to quota. 93 * Allocate inode to quota.
67 */ 94 */
68 if (DQUOT_ALLOC_INODE(inode)) { 95 if (DQUOT_ALLOC_INODE(inode)) {
@@ -74,10 +101,20 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
74 } 101 }
75 102
76 inode->i_mode = mode; 103 inode->i_mode = mode;
77 if (S_ISDIR(mode)) 104 /* inherit flags from parent */
78 jfs_inode->mode2 = IDIRECTORY | mode; 105 jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT;
79 else 106
80 jfs_inode->mode2 = INLINEEA | ISPARSE | mode; 107 if (S_ISDIR(mode)) {
108 jfs_inode->mode2 |= IDIRECTORY;
109 jfs_inode->mode2 &= ~JFS_DIRSYNC_FL;
110 }
111 else {
112 jfs_inode->mode2 |= INLINEEA | ISPARSE;
113 if (S_ISLNK(mode))
114 jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL);
115 }
116 jfs_inode->mode2 |= mode;
117
81 inode->i_blksize = sb->s_blocksize; 118 inode->i_blksize = sb->s_blocksize;
82 inode->i_blocks = 0; 119 inode->i_blocks = 0;
83 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 120 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
@@ -98,6 +135,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
98 jfs_inode->atlhead = 0; 135 jfs_inode->atlhead = 0;
99 jfs_inode->atltail = 0; 136 jfs_inode->atltail = 0;
100 jfs_inode->xtlid = 0; 137 jfs_inode->xtlid = 0;
138 jfs_set_inode_flags(inode);
101 139
102 jfs_info("ialloc returns inode = 0x%p\n", inode); 140 jfs_info("ialloc returns inode = 0x%p\n", inode);
103 141
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index b54bac576cb3..095d471b9f9a 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -20,6 +20,8 @@
20 20
21extern struct inode *ialloc(struct inode *, umode_t); 21extern struct inode *ialloc(struct inode *, umode_t);
22extern int jfs_fsync(struct file *, struct dentry *, int); 22extern int jfs_fsync(struct file *, struct dentry *, int);
23extern int jfs_ioctl(struct inode *, struct file *,
24 unsigned int, unsigned long);
23extern void jfs_read_inode(struct inode *); 25extern void jfs_read_inode(struct inode *);
24extern int jfs_commit_inode(struct inode *, int); 26extern int jfs_commit_inode(struct inode *, int);
25extern int jfs_write_inode(struct inode*, int); 27extern int jfs_write_inode(struct inode*, int);
@@ -29,6 +31,7 @@ extern void jfs_truncate(struct inode *);
29extern void jfs_truncate_nolock(struct inode *, loff_t); 31extern void jfs_truncate_nolock(struct inode *, loff_t);
30extern void jfs_free_zero_link(struct inode *); 32extern void jfs_free_zero_link(struct inode *);
31extern struct dentry *jfs_get_parent(struct dentry *dentry); 33extern struct dentry *jfs_get_parent(struct dentry *dentry);
34extern void jfs_set_inode_flags(struct inode *);
32 35
33extern struct address_space_operations jfs_aops; 36extern struct address_space_operations jfs_aops;
34extern struct inode_operations jfs_dir_inode_operations; 37extern struct inode_operations jfs_dir_inode_operations;
diff --git a/fs/jfs/jfs_lock.h b/fs/jfs/jfs_lock.h
index 10ad1d086685..70ac9f7d1e00 100644
--- a/fs/jfs/jfs_lock.h
+++ b/fs/jfs/jfs_lock.h
@@ -20,6 +20,7 @@
20#define _H_JFS_LOCK 20#define _H_JFS_LOCK
21 21
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/mutex.h>
23#include <linux/sched.h> 24#include <linux/sched.h>
24 25
25/* 26/*
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index d27bac6acaa3..0b348b13b551 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -64,6 +64,7 @@
64#include <linux/interrupt.h> 64#include <linux/interrupt.h>
65#include <linux/smp_lock.h> 65#include <linux/smp_lock.h>
66#include <linux/completion.h> 66#include <linux/completion.h>
67#include <linux/kthread.h>
67#include <linux/buffer_head.h> /* for sync_blockdev() */ 68#include <linux/buffer_head.h> /* for sync_blockdev() */
68#include <linux/bio.h> 69#include <linux/bio.h>
69#include <linux/suspend.h> 70#include <linux/suspend.h>
@@ -81,15 +82,14 @@
81 */ 82 */
82static struct lbuf *log_redrive_list; 83static struct lbuf *log_redrive_list;
83static DEFINE_SPINLOCK(log_redrive_lock); 84static DEFINE_SPINLOCK(log_redrive_lock);
84DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait);
85 85
86 86
87/* 87/*
88 * log read/write serialization (per log) 88 * log read/write serialization (per log)
89 */ 89 */
90#define LOG_LOCK_INIT(log) init_MUTEX(&(log)->loglock) 90#define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock)
91#define LOG_LOCK(log) down(&((log)->loglock)) 91#define LOG_LOCK(log) mutex_lock(&((log)->loglock))
92#define LOG_UNLOCK(log) up(&((log)->loglock)) 92#define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock))
93 93
94 94
95/* 95/*
@@ -1105,11 +1105,10 @@ int lmLogOpen(struct super_block *sb)
1105 } 1105 }
1106 } 1106 }
1107 1107
1108 if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) { 1108 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
1109 up(&jfs_log_sem); 1109 up(&jfs_log_sem);
1110 return -ENOMEM; 1110 return -ENOMEM;
1111 } 1111 }
1112 memset(log, 0, sizeof(struct jfs_log));
1113 INIT_LIST_HEAD(&log->sb_list); 1112 INIT_LIST_HEAD(&log->sb_list);
1114 init_waitqueue_head(&log->syncwait); 1113 init_waitqueue_head(&log->syncwait);
1115 1114
@@ -1181,9 +1180,8 @@ static int open_inline_log(struct super_block *sb)
1181 struct jfs_log *log; 1180 struct jfs_log *log;
1182 int rc; 1181 int rc;
1183 1182
1184 if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) 1183 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL)))
1185 return -ENOMEM; 1184 return -ENOMEM;
1186 memset(log, 0, sizeof(struct jfs_log));
1187 INIT_LIST_HEAD(&log->sb_list); 1185 INIT_LIST_HEAD(&log->sb_list);
1188 init_waitqueue_head(&log->syncwait); 1186 init_waitqueue_head(&log->syncwait);
1189 1187
@@ -1216,12 +1214,11 @@ static int open_dummy_log(struct super_block *sb)
1216 1214
1217 down(&jfs_log_sem); 1215 down(&jfs_log_sem);
1218 if (!dummy_log) { 1216 if (!dummy_log) {
1219 dummy_log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL); 1217 dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL);
1220 if (!dummy_log) { 1218 if (!dummy_log) {
1221 up(&jfs_log_sem); 1219 up(&jfs_log_sem);
1222 return -ENOMEM; 1220 return -ENOMEM;
1223 } 1221 }
1224 memset(dummy_log, 0, sizeof(struct jfs_log));
1225 INIT_LIST_HEAD(&dummy_log->sb_list); 1222 INIT_LIST_HEAD(&dummy_log->sb_list);
1226 init_waitqueue_head(&dummy_log->syncwait); 1223 init_waitqueue_head(&dummy_log->syncwait);
1227 dummy_log->no_integrity = 1; 1224 dummy_log->no_integrity = 1;
@@ -1980,7 +1977,7 @@ static inline void lbmRedrive(struct lbuf *bp)
1980 log_redrive_list = bp; 1977 log_redrive_list = bp;
1981 spin_unlock_irqrestore(&log_redrive_lock, flags); 1978 spin_unlock_irqrestore(&log_redrive_lock, flags);
1982 1979
1983 wake_up(&jfs_IO_thread_wait); 1980 wake_up_process(jfsIOthread);
1984} 1981}
1985 1982
1986 1983
@@ -2347,13 +2344,7 @@ int jfsIOWait(void *arg)
2347{ 2344{
2348 struct lbuf *bp; 2345 struct lbuf *bp;
2349 2346
2350 daemonize("jfsIO");
2351
2352 complete(&jfsIOwait);
2353
2354 do { 2347 do {
2355 DECLARE_WAITQUEUE(wq, current);
2356
2357 spin_lock_irq(&log_redrive_lock); 2348 spin_lock_irq(&log_redrive_lock);
2358 while ((bp = log_redrive_list) != 0) { 2349 while ((bp = log_redrive_list) != 0) {
2359 log_redrive_list = bp->l_redrive_next; 2350 log_redrive_list = bp->l_redrive_next;
@@ -2362,21 +2353,19 @@ int jfsIOWait(void *arg)
2362 lbmStartIO(bp); 2353 lbmStartIO(bp);
2363 spin_lock_irq(&log_redrive_lock); 2354 spin_lock_irq(&log_redrive_lock);
2364 } 2355 }
2356 spin_unlock_irq(&log_redrive_lock);
2357
2365 if (freezing(current)) { 2358 if (freezing(current)) {
2366 spin_unlock_irq(&log_redrive_lock);
2367 refrigerator(); 2359 refrigerator();
2368 } else { 2360 } else {
2369 add_wait_queue(&jfs_IO_thread_wait, &wq);
2370 set_current_state(TASK_INTERRUPTIBLE); 2361 set_current_state(TASK_INTERRUPTIBLE);
2371 spin_unlock_irq(&log_redrive_lock);
2372 schedule(); 2362 schedule();
2373 current->state = TASK_RUNNING; 2363 current->state = TASK_RUNNING;
2374 remove_wait_queue(&jfs_IO_thread_wait, &wq);
2375 } 2364 }
2376 } while (!jfs_stop_threads); 2365 } while (!kthread_should_stop());
2377 2366
2378 jfs_info("jfsIOWait being killed!"); 2367 jfs_info("jfsIOWait being killed!");
2379 complete_and_exit(&jfsIOwait, 0); 2368 return 0;
2380} 2369}
2381 2370
2382/* 2371/*
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index e4978b5b65ee..8c6909b80014 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -389,7 +389,7 @@ struct jfs_log {
389 int eor; /* 4: eor of last record in eol page */ 389 int eor; /* 4: eor of last record in eol page */
390 struct lbuf *bp; /* 4: current log page buffer */ 390 struct lbuf *bp; /* 4: current log page buffer */
391 391
392 struct semaphore loglock; /* 4: log write serialization lock */ 392 struct mutex loglock; /* 4: log write serialization lock */
393 393
394 /* syncpt */ 394 /* syncpt */
395 int nextsync; /* 4: bytes to write before next syncpt */ 395 int nextsync; /* 4: bytes to write before next syncpt */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 8a53981f9f27..5fbaeaadccd3 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -104,10 +104,9 @@ static inline int insert_metapage(struct page *page, struct metapage *mp)
104 if (PagePrivate(page)) 104 if (PagePrivate(page))
105 a = mp_anchor(page); 105 a = mp_anchor(page);
106 else { 106 else {
107 a = kmalloc(sizeof(struct meta_anchor), GFP_NOFS); 107 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
108 if (!a) 108 if (!a)
109 return -ENOMEM; 109 return -ENOMEM;
110 memset(a, 0, sizeof(struct meta_anchor));
111 set_page_private(page, (unsigned long)a); 110 set_page_private(page, (unsigned long)a);
112 SetPagePrivate(page); 111 SetPagePrivate(page);
113 kmap(page); 112 kmap(page);
diff --git a/fs/jfs/jfs_superblock.h b/fs/jfs/jfs_superblock.h
index fcf781bf31cb..682cf1a68a18 100644
--- a/fs/jfs/jfs_superblock.h
+++ b/fs/jfs/jfs_superblock.h
@@ -113,12 +113,9 @@ extern int jfs_mount(struct super_block *);
113extern int jfs_mount_rw(struct super_block *, int); 113extern int jfs_mount_rw(struct super_block *, int);
114extern int jfs_umount(struct super_block *); 114extern int jfs_umount(struct super_block *);
115extern int jfs_umount_rw(struct super_block *); 115extern int jfs_umount_rw(struct super_block *);
116
117extern int jfs_stop_threads;
118extern struct completion jfsIOwait;
119extern wait_queue_head_t jfs_IO_thread_wait;
120extern wait_queue_head_t jfs_commit_thread_wait;
121extern wait_queue_head_t jfs_sync_thread_wait;
122extern int jfs_extendfs(struct super_block *, s64, int); 116extern int jfs_extendfs(struct super_block *, s64, int);
123 117
118extern struct task_struct *jfsIOthread;
119extern struct task_struct *jfsSyncThread;
120
124#endif /*_H_JFS_SUPERBLOCK */ 121#endif /*_H_JFS_SUPERBLOCK */
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 2ddb6b892bcf..ac3d66948e8c 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -49,6 +49,7 @@
49#include <linux/suspend.h> 49#include <linux/suspend.h>
50#include <linux/module.h> 50#include <linux/module.h>
51#include <linux/moduleparam.h> 51#include <linux/moduleparam.h>
52#include <linux/kthread.h>
52#include "jfs_incore.h" 53#include "jfs_incore.h"
53#include "jfs_inode.h" 54#include "jfs_inode.h"
54#include "jfs_filsys.h" 55#include "jfs_filsys.h"
@@ -121,8 +122,7 @@ static DEFINE_SPINLOCK(jfsTxnLock);
121#define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags) 122#define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
122#define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags) 123#define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
123 124
124DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait); 125static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
125DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
126static int jfs_commit_thread_waking; 126static int jfs_commit_thread_waking;
127 127
128/* 128/*
@@ -207,7 +207,7 @@ static lid_t txLockAlloc(void)
207 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) { 207 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
208 jfs_info("txLockAlloc tlocks low"); 208 jfs_info("txLockAlloc tlocks low");
209 jfs_tlocks_low = 1; 209 jfs_tlocks_low = 1;
210 wake_up(&jfs_sync_thread_wait); 210 wake_up_process(jfsSyncThread);
211 } 211 }
212 212
213 return lid; 213 return lid;
@@ -2743,10 +2743,6 @@ int jfs_lazycommit(void *arg)
2743 unsigned long flags; 2743 unsigned long flags;
2744 struct jfs_sb_info *sbi; 2744 struct jfs_sb_info *sbi;
2745 2745
2746 daemonize("jfsCommit");
2747
2748 complete(&jfsIOwait);
2749
2750 do { 2746 do {
2751 LAZY_LOCK(flags); 2747 LAZY_LOCK(flags);
2752 jfs_commit_thread_waking = 0; /* OK to wake another thread */ 2748 jfs_commit_thread_waking = 0; /* OK to wake another thread */
@@ -2806,13 +2802,13 @@ int jfs_lazycommit(void *arg)
2806 current->state = TASK_RUNNING; 2802 current->state = TASK_RUNNING;
2807 remove_wait_queue(&jfs_commit_thread_wait, &wq); 2803 remove_wait_queue(&jfs_commit_thread_wait, &wq);
2808 } 2804 }
2809 } while (!jfs_stop_threads); 2805 } while (!kthread_should_stop());
2810 2806
2811 if (!list_empty(&TxAnchor.unlock_queue)) 2807 if (!list_empty(&TxAnchor.unlock_queue))
2812 jfs_err("jfs_lazycommit being killed w/pending transactions!"); 2808 jfs_err("jfs_lazycommit being killed w/pending transactions!");
2813 else 2809 else
2814 jfs_info("jfs_lazycommit being killed\n"); 2810 jfs_info("jfs_lazycommit being killed\n");
2815 complete_and_exit(&jfsIOwait, 0); 2811 return 0;
2816} 2812}
2817 2813
2818void txLazyUnlock(struct tblock * tblk) 2814void txLazyUnlock(struct tblock * tblk)
@@ -2876,10 +2872,10 @@ restart:
2876 */ 2872 */
2877 TXN_UNLOCK(); 2873 TXN_UNLOCK();
2878 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE); 2874 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2879 down(&jfs_ip->commit_sem); 2875 mutex_lock(&jfs_ip->commit_mutex);
2880 txCommit(tid, 1, &ip, 0); 2876 txCommit(tid, 1, &ip, 0);
2881 txEnd(tid); 2877 txEnd(tid);
2882 up(&jfs_ip->commit_sem); 2878 mutex_unlock(&jfs_ip->commit_mutex);
2883 /* 2879 /*
2884 * Just to be safe. I don't know how 2880 * Just to be safe. I don't know how
2885 * long we can run without blocking 2881 * long we can run without blocking
@@ -2932,10 +2928,6 @@ int jfs_sync(void *arg)
2932 int rc; 2928 int rc;
2933 tid_t tid; 2929 tid_t tid;
2934 2930
2935 daemonize("jfsSync");
2936
2937 complete(&jfsIOwait);
2938
2939 do { 2931 do {
2940 /* 2932 /*
2941 * write each inode on the anonymous inode list 2933 * write each inode on the anonymous inode list
@@ -2952,7 +2944,7 @@ int jfs_sync(void *arg)
2952 * Inode is being freed 2944 * Inode is being freed
2953 */ 2945 */
2954 list_del_init(&jfs_ip->anon_inode_list); 2946 list_del_init(&jfs_ip->anon_inode_list);
2955 } else if (! down_trylock(&jfs_ip->commit_sem)) { 2947 } else if (! !mutex_trylock(&jfs_ip->commit_mutex)) {
2956 /* 2948 /*
2957 * inode will be removed from anonymous list 2949 * inode will be removed from anonymous list
2958 * when it is committed 2950 * when it is committed
@@ -2961,7 +2953,7 @@ int jfs_sync(void *arg)
2961 tid = txBegin(ip->i_sb, COMMIT_INODE); 2953 tid = txBegin(ip->i_sb, COMMIT_INODE);
2962 rc = txCommit(tid, 1, &ip, 0); 2954 rc = txCommit(tid, 1, &ip, 0);
2963 txEnd(tid); 2955 txEnd(tid);
2964 up(&jfs_ip->commit_sem); 2956 mutex_unlock(&jfs_ip->commit_mutex);
2965 2957
2966 iput(ip); 2958 iput(ip);
2967 /* 2959 /*
@@ -2971,7 +2963,7 @@ int jfs_sync(void *arg)
2971 cond_resched(); 2963 cond_resched();
2972 TXN_LOCK(); 2964 TXN_LOCK();
2973 } else { 2965 } else {
2974 /* We can't get the commit semaphore. It may 2966 /* We can't get the commit mutex. It may
2975 * be held by a thread waiting for tlock's 2967 * be held by a thread waiting for tlock's
2976 * so let's not block here. Save it to 2968 * so let's not block here. Save it to
2977 * put back on the anon_list. 2969 * put back on the anon_list.
@@ -2996,19 +2988,15 @@ int jfs_sync(void *arg)
2996 TXN_UNLOCK(); 2988 TXN_UNLOCK();
2997 refrigerator(); 2989 refrigerator();
2998 } else { 2990 } else {
2999 DECLARE_WAITQUEUE(wq, current);
3000
3001 add_wait_queue(&jfs_sync_thread_wait, &wq);
3002 set_current_state(TASK_INTERRUPTIBLE); 2991 set_current_state(TASK_INTERRUPTIBLE);
3003 TXN_UNLOCK(); 2992 TXN_UNLOCK();
3004 schedule(); 2993 schedule();
3005 current->state = TASK_RUNNING; 2994 current->state = TASK_RUNNING;
3006 remove_wait_queue(&jfs_sync_thread_wait, &wq);
3007 } 2995 }
3008 } while (!jfs_stop_threads); 2996 } while (!kthread_should_stop());
3009 2997
3010 jfs_info("jfs_sync being killed"); 2998 jfs_info("jfs_sync being killed");
3011 complete_and_exit(&jfsIOwait, 0); 2999 return 0;
3012} 3000}
3013 3001
3014#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG) 3002#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 4abbe8604302..309cee575f7d 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -104,8 +104,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
104 104
105 tid = txBegin(dip->i_sb, 0); 105 tid = txBegin(dip->i_sb, 0);
106 106
107 down(&JFS_IP(dip)->commit_sem); 107 mutex_lock(&JFS_IP(dip)->commit_mutex);
108 down(&JFS_IP(ip)->commit_sem); 108 mutex_lock(&JFS_IP(ip)->commit_mutex);
109 109
110 rc = jfs_init_acl(tid, ip, dip); 110 rc = jfs_init_acl(tid, ip, dip);
111 if (rc) 111 if (rc)
@@ -165,8 +165,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
165 165
166 out3: 166 out3:
167 txEnd(tid); 167 txEnd(tid);
168 up(&JFS_IP(dip)->commit_sem); 168 mutex_unlock(&JFS_IP(dip)->commit_mutex);
169 up(&JFS_IP(ip)->commit_sem); 169 mutex_unlock(&JFS_IP(ip)->commit_mutex);
170 if (rc) { 170 if (rc) {
171 free_ea_wmap(ip); 171 free_ea_wmap(ip);
172 ip->i_nlink = 0; 172 ip->i_nlink = 0;
@@ -238,8 +238,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
238 238
239 tid = txBegin(dip->i_sb, 0); 239 tid = txBegin(dip->i_sb, 0);
240 240
241 down(&JFS_IP(dip)->commit_sem); 241 mutex_lock(&JFS_IP(dip)->commit_mutex);
242 down(&JFS_IP(ip)->commit_sem); 242 mutex_lock(&JFS_IP(ip)->commit_mutex);
243 243
244 rc = jfs_init_acl(tid, ip, dip); 244 rc = jfs_init_acl(tid, ip, dip);
245 if (rc) 245 if (rc)
@@ -300,8 +300,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
300 300
301 out3: 301 out3:
302 txEnd(tid); 302 txEnd(tid);
303 up(&JFS_IP(dip)->commit_sem); 303 mutex_unlock(&JFS_IP(dip)->commit_mutex);
304 up(&JFS_IP(ip)->commit_sem); 304 mutex_unlock(&JFS_IP(ip)->commit_mutex);
305 if (rc) { 305 if (rc) {
306 free_ea_wmap(ip); 306 free_ea_wmap(ip);
307 ip->i_nlink = 0; 307 ip->i_nlink = 0;
@@ -365,8 +365,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
365 365
366 tid = txBegin(dip->i_sb, 0); 366 tid = txBegin(dip->i_sb, 0);
367 367
368 down(&JFS_IP(dip)->commit_sem); 368 mutex_lock(&JFS_IP(dip)->commit_mutex);
369 down(&JFS_IP(ip)->commit_sem); 369 mutex_lock(&JFS_IP(ip)->commit_mutex);
370 370
371 iplist[0] = dip; 371 iplist[0] = dip;
372 iplist[1] = ip; 372 iplist[1] = ip;
@@ -384,8 +384,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
384 if (rc == -EIO) 384 if (rc == -EIO)
385 txAbort(tid, 1); 385 txAbort(tid, 1);
386 txEnd(tid); 386 txEnd(tid);
387 up(&JFS_IP(dip)->commit_sem); 387 mutex_unlock(&JFS_IP(dip)->commit_mutex);
388 up(&JFS_IP(ip)->commit_sem); 388 mutex_unlock(&JFS_IP(ip)->commit_mutex);
389 389
390 goto out2; 390 goto out2;
391 } 391 }
@@ -422,8 +422,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
422 422
423 txEnd(tid); 423 txEnd(tid);
424 424
425 up(&JFS_IP(dip)->commit_sem); 425 mutex_unlock(&JFS_IP(dip)->commit_mutex);
426 up(&JFS_IP(ip)->commit_sem); 426 mutex_unlock(&JFS_IP(ip)->commit_mutex);
427 427
428 /* 428 /*
429 * Truncating the directory index table is not guaranteed. It 429 * Truncating the directory index table is not guaranteed. It
@@ -488,8 +488,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
488 488
489 tid = txBegin(dip->i_sb, 0); 489 tid = txBegin(dip->i_sb, 0);
490 490
491 down(&JFS_IP(dip)->commit_sem); 491 mutex_lock(&JFS_IP(dip)->commit_mutex);
492 down(&JFS_IP(ip)->commit_sem); 492 mutex_lock(&JFS_IP(ip)->commit_mutex);
493 493
494 iplist[0] = dip; 494 iplist[0] = dip;
495 iplist[1] = ip; 495 iplist[1] = ip;
@@ -503,8 +503,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
503 if (rc == -EIO) 503 if (rc == -EIO)
504 txAbort(tid, 1); /* Marks FS Dirty */ 504 txAbort(tid, 1); /* Marks FS Dirty */
505 txEnd(tid); 505 txEnd(tid);
506 up(&JFS_IP(dip)->commit_sem); 506 mutex_unlock(&JFS_IP(dip)->commit_mutex);
507 up(&JFS_IP(ip)->commit_sem); 507 mutex_unlock(&JFS_IP(ip)->commit_mutex);
508 IWRITE_UNLOCK(ip); 508 IWRITE_UNLOCK(ip);
509 goto out1; 509 goto out1;
510 } 510 }
@@ -527,8 +527,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
527 if ((new_size = commitZeroLink(tid, ip)) < 0) { 527 if ((new_size = commitZeroLink(tid, ip)) < 0) {
528 txAbort(tid, 1); /* Marks FS Dirty */ 528 txAbort(tid, 1); /* Marks FS Dirty */
529 txEnd(tid); 529 txEnd(tid);
530 up(&JFS_IP(dip)->commit_sem); 530 mutex_unlock(&JFS_IP(dip)->commit_mutex);
531 up(&JFS_IP(ip)->commit_sem); 531 mutex_unlock(&JFS_IP(ip)->commit_mutex);
532 IWRITE_UNLOCK(ip); 532 IWRITE_UNLOCK(ip);
533 rc = new_size; 533 rc = new_size;
534 goto out1; 534 goto out1;
@@ -556,13 +556,13 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
556 556
557 txEnd(tid); 557 txEnd(tid);
558 558
559 up(&JFS_IP(dip)->commit_sem); 559 mutex_unlock(&JFS_IP(dip)->commit_mutex);
560 up(&JFS_IP(ip)->commit_sem); 560 mutex_unlock(&JFS_IP(ip)->commit_mutex);
561 561
562 562
563 while (new_size && (rc == 0)) { 563 while (new_size && (rc == 0)) {
564 tid = txBegin(dip->i_sb, 0); 564 tid = txBegin(dip->i_sb, 0);
565 down(&JFS_IP(ip)->commit_sem); 565 mutex_lock(&JFS_IP(ip)->commit_mutex);
566 new_size = xtTruncate_pmap(tid, ip, new_size); 566 new_size = xtTruncate_pmap(tid, ip, new_size);
567 if (new_size < 0) { 567 if (new_size < 0) {
568 txAbort(tid, 1); /* Marks FS Dirty */ 568 txAbort(tid, 1); /* Marks FS Dirty */
@@ -570,7 +570,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
570 } else 570 } else
571 rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC); 571 rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC);
572 txEnd(tid); 572 txEnd(tid);
573 up(&JFS_IP(ip)->commit_sem); 573 mutex_unlock(&JFS_IP(ip)->commit_mutex);
574 } 574 }
575 575
576 if (ip->i_nlink == 0) 576 if (ip->i_nlink == 0)
@@ -805,8 +805,8 @@ static int jfs_link(struct dentry *old_dentry,
805 805
806 tid = txBegin(ip->i_sb, 0); 806 tid = txBegin(ip->i_sb, 0);
807 807
808 down(&JFS_IP(dir)->commit_sem); 808 mutex_lock(&JFS_IP(dir)->commit_mutex);
809 down(&JFS_IP(ip)->commit_sem); 809 mutex_lock(&JFS_IP(ip)->commit_mutex);
810 810
811 /* 811 /*
812 * scan parent directory for entry/freespace 812 * scan parent directory for entry/freespace
@@ -847,8 +847,8 @@ static int jfs_link(struct dentry *old_dentry,
847 out: 847 out:
848 txEnd(tid); 848 txEnd(tid);
849 849
850 up(&JFS_IP(dir)->commit_sem); 850 mutex_unlock(&JFS_IP(dir)->commit_mutex);
851 up(&JFS_IP(ip)->commit_sem); 851 mutex_unlock(&JFS_IP(ip)->commit_mutex);
852 852
853 jfs_info("jfs_link: rc:%d", rc); 853 jfs_info("jfs_link: rc:%d", rc);
854 return rc; 854 return rc;
@@ -916,8 +916,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
916 916
917 tid = txBegin(dip->i_sb, 0); 917 tid = txBegin(dip->i_sb, 0);
918 918
919 down(&JFS_IP(dip)->commit_sem); 919 mutex_lock(&JFS_IP(dip)->commit_mutex);
920 down(&JFS_IP(ip)->commit_sem); 920 mutex_lock(&JFS_IP(ip)->commit_mutex);
921 921
922 rc = jfs_init_security(tid, ip, dip); 922 rc = jfs_init_security(tid, ip, dip);
923 if (rc) 923 if (rc)
@@ -1037,8 +1037,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
1037 1037
1038 out3: 1038 out3:
1039 txEnd(tid); 1039 txEnd(tid);
1040 up(&JFS_IP(dip)->commit_sem); 1040 mutex_unlock(&JFS_IP(dip)->commit_mutex);
1041 up(&JFS_IP(ip)->commit_sem); 1041 mutex_unlock(&JFS_IP(ip)->commit_mutex);
1042 if (rc) { 1042 if (rc) {
1043 free_ea_wmap(ip); 1043 free_ea_wmap(ip);
1044 ip->i_nlink = 0; 1044 ip->i_nlink = 0;
@@ -1141,13 +1141,13 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1141 */ 1141 */
1142 tid = txBegin(new_dir->i_sb, 0); 1142 tid = txBegin(new_dir->i_sb, 0);
1143 1143
1144 down(&JFS_IP(new_dir)->commit_sem); 1144 mutex_lock(&JFS_IP(new_dir)->commit_mutex);
1145 down(&JFS_IP(old_ip)->commit_sem); 1145 mutex_lock(&JFS_IP(old_ip)->commit_mutex);
1146 if (old_dir != new_dir) 1146 if (old_dir != new_dir)
1147 down(&JFS_IP(old_dir)->commit_sem); 1147 mutex_lock(&JFS_IP(old_dir)->commit_mutex);
1148 1148
1149 if (new_ip) { 1149 if (new_ip) {
1150 down(&JFS_IP(new_ip)->commit_sem); 1150 mutex_lock(&JFS_IP(new_ip)->commit_mutex);
1151 /* 1151 /*
1152 * Change existing directory entry to new inode number 1152 * Change existing directory entry to new inode number
1153 */ 1153 */
@@ -1160,10 +1160,10 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1160 if (S_ISDIR(new_ip->i_mode)) { 1160 if (S_ISDIR(new_ip->i_mode)) {
1161 new_ip->i_nlink--; 1161 new_ip->i_nlink--;
1162 if (new_ip->i_nlink) { 1162 if (new_ip->i_nlink) {
1163 up(&JFS_IP(new_dir)->commit_sem); 1163 mutex_unlock(&JFS_IP(new_dir)->commit_mutex);
1164 up(&JFS_IP(old_ip)->commit_sem); 1164 mutex_unlock(&JFS_IP(old_ip)->commit_mutex);
1165 if (old_dir != new_dir) 1165 if (old_dir != new_dir)
1166 up(&JFS_IP(old_dir)->commit_sem); 1166 mutex_unlock(&JFS_IP(old_dir)->commit_mutex);
1167 if (!S_ISDIR(old_ip->i_mode) && new_ip) 1167 if (!S_ISDIR(old_ip->i_mode) && new_ip)
1168 IWRITE_UNLOCK(new_ip); 1168 IWRITE_UNLOCK(new_ip);
1169 jfs_error(new_ip->i_sb, 1169 jfs_error(new_ip->i_sb,
@@ -1282,16 +1282,16 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1282 out4: 1282 out4:
1283 txEnd(tid); 1283 txEnd(tid);
1284 1284
1285 up(&JFS_IP(new_dir)->commit_sem); 1285 mutex_unlock(&JFS_IP(new_dir)->commit_mutex);
1286 up(&JFS_IP(old_ip)->commit_sem); 1286 mutex_unlock(&JFS_IP(old_ip)->commit_mutex);
1287 if (old_dir != new_dir) 1287 if (old_dir != new_dir)
1288 up(&JFS_IP(old_dir)->commit_sem); 1288 mutex_unlock(&JFS_IP(old_dir)->commit_mutex);
1289 if (new_ip) 1289 if (new_ip)
1290 up(&JFS_IP(new_ip)->commit_sem); 1290 mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
1291 1291
1292 while (new_size && (rc == 0)) { 1292 while (new_size && (rc == 0)) {
1293 tid = txBegin(new_ip->i_sb, 0); 1293 tid = txBegin(new_ip->i_sb, 0);
1294 down(&JFS_IP(new_ip)->commit_sem); 1294 mutex_lock(&JFS_IP(new_ip)->commit_mutex);
1295 new_size = xtTruncate_pmap(tid, new_ip, new_size); 1295 new_size = xtTruncate_pmap(tid, new_ip, new_size);
1296 if (new_size < 0) { 1296 if (new_size < 0) {
1297 txAbort(tid, 1); 1297 txAbort(tid, 1);
@@ -1299,7 +1299,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1299 } else 1299 } else
1300 rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC); 1300 rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC);
1301 txEnd(tid); 1301 txEnd(tid);
1302 up(&JFS_IP(new_ip)->commit_sem); 1302 mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
1303 } 1303 }
1304 if (new_ip && (new_ip->i_nlink == 0)) 1304 if (new_ip && (new_ip->i_nlink == 0))
1305 set_cflag(COMMIT_Nolink, new_ip); 1305 set_cflag(COMMIT_Nolink, new_ip);
@@ -1361,8 +1361,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
1361 1361
1362 tid = txBegin(dir->i_sb, 0); 1362 tid = txBegin(dir->i_sb, 0);
1363 1363
1364 down(&JFS_IP(dir)->commit_sem); 1364 mutex_lock(&JFS_IP(dir)->commit_mutex);
1365 down(&JFS_IP(ip)->commit_sem); 1365 mutex_lock(&JFS_IP(ip)->commit_mutex);
1366 1366
1367 rc = jfs_init_acl(tid, ip, dir); 1367 rc = jfs_init_acl(tid, ip, dir);
1368 if (rc) 1368 if (rc)
@@ -1407,8 +1407,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
1407 1407
1408 out3: 1408 out3:
1409 txEnd(tid); 1409 txEnd(tid);
1410 up(&JFS_IP(ip)->commit_sem); 1410 mutex_unlock(&JFS_IP(ip)->commit_mutex);
1411 up(&JFS_IP(dir)->commit_sem); 1411 mutex_unlock(&JFS_IP(dir)->commit_mutex);
1412 if (rc) { 1412 if (rc) {
1413 free_ea_wmap(ip); 1413 free_ea_wmap(ip);
1414 ip->i_nlink = 0; 1414 ip->i_nlink = 0;
@@ -1523,6 +1523,7 @@ struct file_operations jfs_dir_operations = {
1523 .read = generic_read_dir, 1523 .read = generic_read_dir,
1524 .readdir = jfs_readdir, 1524 .readdir = jfs_readdir,
1525 .fsync = jfs_fsync, 1525 .fsync = jfs_fsync,
1526 .ioctl = jfs_ioctl,
1526}; 1527};
1527 1528
1528static int jfs_ci_hash(struct dentry *dir, struct qstr *this) 1529static int jfs_ci_hash(struct dentry *dir, struct qstr *this)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 8d31f1336431..18f69e6aa719 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -25,6 +25,7 @@
25#include <linux/vfs.h> 25#include <linux/vfs.h>
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/kthread.h>
28#include <linux/posix_acl.h> 29#include <linux/posix_acl.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <linux/seq_file.h> 31#include <linux/seq_file.h>
@@ -54,11 +55,9 @@ static int commit_threads = 0;
54module_param(commit_threads, int, 0); 55module_param(commit_threads, int, 0);
55MODULE_PARM_DESC(commit_threads, "Number of commit threads"); 56MODULE_PARM_DESC(commit_threads, "Number of commit threads");
56 57
57int jfs_stop_threads; 58static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
58static pid_t jfsIOthread; 59struct task_struct *jfsIOthread;
59static pid_t jfsCommitThread[MAX_COMMIT_THREADS]; 60struct task_struct *jfsSyncThread;
60static pid_t jfsSyncThread;
61DECLARE_COMPLETION(jfsIOwait);
62 61
63#ifdef CONFIG_JFS_DEBUG 62#ifdef CONFIG_JFS_DEBUG
64int jfsloglevel = JFS_LOGLEVEL_WARN; 63int jfsloglevel = JFS_LOGLEVEL_WARN;
@@ -195,7 +194,7 @@ static void jfs_put_super(struct super_block *sb)
195enum { 194enum {
196 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, 195 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
197 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, 196 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
198 Opt_usrquota, Opt_grpquota 197 Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
199}; 198};
200 199
201static match_table_t tokens = { 200static match_table_t tokens = {
@@ -209,6 +208,9 @@ static match_table_t tokens = {
209 {Opt_ignore, "quota"}, 208 {Opt_ignore, "quota"},
210 {Opt_usrquota, "usrquota"}, 209 {Opt_usrquota, "usrquota"},
211 {Opt_grpquota, "grpquota"}, 210 {Opt_grpquota, "grpquota"},
211 {Opt_uid, "uid=%u"},
212 {Opt_gid, "gid=%u"},
213 {Opt_umask, "umask=%u"},
212 {Opt_err, NULL} 214 {Opt_err, NULL}
213}; 215};
214 216
@@ -313,7 +315,29 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
313 "JFS: quota operations not supported\n"); 315 "JFS: quota operations not supported\n");
314 break; 316 break;
315#endif 317#endif
316 318 case Opt_uid:
319 {
320 char *uid = args[0].from;
321 sbi->uid = simple_strtoul(uid, &uid, 0);
322 break;
323 }
324 case Opt_gid:
325 {
326 char *gid = args[0].from;
327 sbi->gid = simple_strtoul(gid, &gid, 0);
328 break;
329 }
330 case Opt_umask:
331 {
332 char *umask = args[0].from;
333 sbi->umask = simple_strtoul(umask, &umask, 8);
334 if (sbi->umask & ~0777) {
335 printk(KERN_ERR
336 "JFS: Invalid value of umask\n");
337 goto cleanup;
338 }
339 break;
340 }
317 default: 341 default:
318 printk("jfs: Unrecognized mount option \"%s\" " 342 printk("jfs: Unrecognized mount option \"%s\" "
319 " or missing value\n", p); 343 " or missing value\n", p);
@@ -396,12 +420,12 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
396 if (!new_valid_dev(sb->s_bdev->bd_dev)) 420 if (!new_valid_dev(sb->s_bdev->bd_dev))
397 return -EOVERFLOW; 421 return -EOVERFLOW;
398 422
399 sbi = kmalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); 423 sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL);
400 if (!sbi) 424 if (!sbi)
401 return -ENOSPC; 425 return -ENOSPC;
402 memset(sbi, 0, sizeof (struct jfs_sb_info));
403 sb->s_fs_info = sbi; 426 sb->s_fs_info = sbi;
404 sbi->sb = sb; 427 sbi->sb = sb;
428 sbi->uid = sbi->gid = sbi->umask = -1;
405 429
406 /* initialize the mount flag and determine the default error handler */ 430 /* initialize the mount flag and determine the default error handler */
407 flag = JFS_ERR_REMOUNT_RO; 431 flag = JFS_ERR_REMOUNT_RO;
@@ -564,10 +588,14 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
564{ 588{
565 struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb); 589 struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb);
566 590
591 if (sbi->uid != -1)
592 seq_printf(seq, ",uid=%d", sbi->uid);
593 if (sbi->gid != -1)
594 seq_printf(seq, ",gid=%d", sbi->gid);
595 if (sbi->umask != -1)
596 seq_printf(seq, ",umask=%03o", sbi->umask);
567 if (sbi->flag & JFS_NOINTEGRITY) 597 if (sbi->flag & JFS_NOINTEGRITY)
568 seq_puts(seq, ",nointegrity"); 598 seq_puts(seq, ",nointegrity");
569 else
570 seq_puts(seq, ",integrity");
571 599
572#if defined(CONFIG_QUOTA) 600#if defined(CONFIG_QUOTA)
573 if (sbi->flag & JFS_USRQUOTA) 601 if (sbi->flag & JFS_USRQUOTA)
@@ -617,7 +645,7 @@ static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
617 memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 645 memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
618 INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 646 INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
619 init_rwsem(&jfs_ip->rdwrlock); 647 init_rwsem(&jfs_ip->rdwrlock);
620 init_MUTEX(&jfs_ip->commit_sem); 648 mutex_init(&jfs_ip->commit_mutex);
621 init_rwsem(&jfs_ip->xattr_sem); 649 init_rwsem(&jfs_ip->xattr_sem);
622 spin_lock_init(&jfs_ip->ag_lock); 650 spin_lock_init(&jfs_ip->ag_lock);
623 jfs_ip->active_ag = -1; 651 jfs_ip->active_ag = -1;
@@ -661,12 +689,12 @@ static int __init init_jfs_fs(void)
661 /* 689 /*
662 * I/O completion thread (endio) 690 * I/O completion thread (endio)
663 */ 691 */
664 jfsIOthread = kernel_thread(jfsIOWait, NULL, CLONE_KERNEL); 692 jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
665 if (jfsIOthread < 0) { 693 if (IS_ERR(jfsIOthread)) {
666 jfs_err("init_jfs_fs: fork failed w/rc = %d", jfsIOthread); 694 rc = PTR_ERR(jfsIOthread);
695 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
667 goto end_txmngr; 696 goto end_txmngr;
668 } 697 }
669 wait_for_completion(&jfsIOwait); /* Wait until thread starts */
670 698
671 if (commit_threads < 1) 699 if (commit_threads < 1)
672 commit_threads = num_online_cpus(); 700 commit_threads = num_online_cpus();
@@ -674,24 +702,21 @@ static int __init init_jfs_fs(void)
674 commit_threads = MAX_COMMIT_THREADS; 702 commit_threads = MAX_COMMIT_THREADS;
675 703
676 for (i = 0; i < commit_threads; i++) { 704 for (i = 0; i < commit_threads; i++) {
677 jfsCommitThread[i] = kernel_thread(jfs_lazycommit, NULL, 705 jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit");
678 CLONE_KERNEL); 706 if (IS_ERR(jfsCommitThread[i])) {
679 if (jfsCommitThread[i] < 0) { 707 rc = PTR_ERR(jfsCommitThread[i]);
680 jfs_err("init_jfs_fs: fork failed w/rc = %d", 708 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
681 jfsCommitThread[i]);
682 commit_threads = i; 709 commit_threads = i;
683 goto kill_committask; 710 goto kill_committask;
684 } 711 }
685 /* Wait until thread starts */
686 wait_for_completion(&jfsIOwait);
687 } 712 }
688 713
689 jfsSyncThread = kernel_thread(jfs_sync, NULL, CLONE_KERNEL); 714 jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
690 if (jfsSyncThread < 0) { 715 if (IS_ERR(jfsSyncThread)) {
691 jfs_err("init_jfs_fs: fork failed w/rc = %d", jfsSyncThread); 716 rc = PTR_ERR(jfsSyncThread);
717 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
692 goto kill_committask; 718 goto kill_committask;
693 } 719 }
694 wait_for_completion(&jfsIOwait); /* Wait until thread starts */
695 720
696#ifdef PROC_FS_JFS 721#ifdef PROC_FS_JFS
697 jfs_proc_init(); 722 jfs_proc_init();
@@ -700,13 +725,9 @@ static int __init init_jfs_fs(void)
700 return register_filesystem(&jfs_fs_type); 725 return register_filesystem(&jfs_fs_type);
701 726
702kill_committask: 727kill_committask:
703 jfs_stop_threads = 1;
704 wake_up_all(&jfs_commit_thread_wait);
705 for (i = 0; i < commit_threads; i++) 728 for (i = 0; i < commit_threads; i++)
706 wait_for_completion(&jfsIOwait); 729 kthread_stop(jfsCommitThread[i]);
707 730 kthread_stop(jfsIOthread);
708 wake_up(&jfs_IO_thread_wait);
709 wait_for_completion(&jfsIOwait); /* Wait for thread exit */
710end_txmngr: 731end_txmngr:
711 txExit(); 732 txExit();
712free_metapage: 733free_metapage:
@@ -722,16 +743,13 @@ static void __exit exit_jfs_fs(void)
722 743
723 jfs_info("exit_jfs_fs called"); 744 jfs_info("exit_jfs_fs called");
724 745
725 jfs_stop_threads = 1;
726 txExit(); 746 txExit();
727 metapage_exit(); 747 metapage_exit();
728 wake_up(&jfs_IO_thread_wait); 748
729 wait_for_completion(&jfsIOwait); /* Wait until IO thread exits */ 749 kthread_stop(jfsIOthread);
730 wake_up_all(&jfs_commit_thread_wait);
731 for (i = 0; i < commit_threads; i++) 750 for (i = 0; i < commit_threads; i++)
732 wait_for_completion(&jfsIOwait); 751 kthread_stop(jfsCommitThread[i]);
733 wake_up(&jfs_sync_thread_wait); 752 kthread_stop(jfsSyncThread);
734 wait_for_completion(&jfsIOwait); /* Wait until Sync thread exits */
735#ifdef PROC_FS_JFS 753#ifdef PROC_FS_JFS
736 jfs_proc_clean(); 754 jfs_proc_clean();
737#endif 755#endif
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index f23048f9471f..9bc5b7c055ce 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -934,13 +934,13 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
934 } 934 }
935 935
936 tid = txBegin(inode->i_sb, 0); 936 tid = txBegin(inode->i_sb, 0);
937 down(&ji->commit_sem); 937 mutex_lock(&ji->commit_mutex);
938 rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len, 938 rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len,
939 flags); 939 flags);
940 if (!rc) 940 if (!rc)
941 rc = txCommit(tid, 1, &inode, 0); 941 rc = txCommit(tid, 1, &inode, 0);
942 txEnd(tid); 942 txEnd(tid);
943 up(&ji->commit_sem); 943 mutex_unlock(&ji->commit_mutex);
944 944
945 return rc; 945 return rc;
946} 946}
@@ -1093,12 +1093,12 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
1093 return rc; 1093 return rc;
1094 1094
1095 tid = txBegin(inode->i_sb, 0); 1095 tid = txBegin(inode->i_sb, 0);
1096 down(&ji->commit_sem); 1096 mutex_lock(&ji->commit_mutex);
1097 rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); 1097 rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
1098 if (!rc) 1098 if (!rc)
1099 rc = txCommit(tid, 1, &inode, 0); 1099 rc = txCommit(tid, 1, &inode, 0);
1100 txEnd(tid); 1100 txEnd(tid);
1101 up(&ji->commit_sem); 1101 mutex_unlock(&ji->commit_mutex);
1102 1102
1103 return rc; 1103 return rc;
1104} 1104}
diff --git a/include/asm-arm/irq.h b/include/asm-arm/irq.h
index 7772432d3fd7..60b5105c9c93 100644
--- a/include/asm-arm/irq.h
+++ b/include/asm-arm/irq.h
@@ -27,7 +27,7 @@ extern void enable_irq(unsigned int);
27 27
28/* 28/*
29 * These correspond with the SA_TRIGGER_* defines, and therefore the 29 * These correspond with the SA_TRIGGER_* defines, and therefore the
30 * IRQRESOURCE_IRQ_* defines. 30 * IORESOURCE_IRQ_* defines.
31 */ 31 */
32#define __IRQT_RISEDGE (1 << 0) 32#define __IRQT_RISEDGE (1 << 0)
33#define __IRQT_FALEDGE (1 << 1) 33#define __IRQT_FALEDGE (1 << 1)
diff --git a/include/asm-sparc/idprom.h b/include/asm-sparc/idprom.h
index d856e640acd3..59083ed85232 100644
--- a/include/asm-sparc/idprom.h
+++ b/include/asm-sparc/idprom.h
@@ -7,27 +7,19 @@
7#ifndef _SPARC_IDPROM_H 7#ifndef _SPARC_IDPROM_H
8#define _SPARC_IDPROM_H 8#define _SPARC_IDPROM_H
9 9
10/* Offset into the EEPROM where the id PROM is located on the 4c */ 10#include <linux/types.h>
11#define IDPROM_OFFSET 0x7d8
12 11
13/* On sun4m; physical. */ 12struct idprom {
14/* MicroSPARC(-II) does not decode 31rd bit, but it works. */ 13 u8 id_format; /* Format identifier (always 0x01) */
15#define IDPROM_OFFSET_M 0xfd8 14 u8 id_machtype; /* Machine type */
16 15 u8 id_ethaddr[6]; /* Hardware ethernet address */
17struct idprom 16 s32 id_date; /* Date of manufacture */
18{ 17 u32 id_sernum:24; /* Unique serial number */
19 unsigned char id_format; /* Format identifier (always 0x01) */ 18 u8 id_cksum; /* Checksum - xor of the data bytes */
20 unsigned char id_machtype; /* Machine type */ 19 u8 reserved[16];
21 unsigned char id_ethaddr[6]; /* Hardware ethernet address */
22 long id_date; /* Date of manufacture */
23 unsigned int id_sernum:24; /* Unique serial number */
24 unsigned char id_cksum; /* Checksum - xor of the data bytes */
25 unsigned char reserved[16];
26}; 20};
27 21
28extern struct idprom *idprom; 22extern struct idprom *idprom;
29extern void idprom_init(void); 23extern void idprom_init(void);
30 24
31#define IDPROM_SIZE (sizeof(struct idprom))
32
33#endif /* !(_SPARC_IDPROM_H) */ 25#endif /* !(_SPARC_IDPROM_H) */
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index d0d76b30eb4c..f283f8aaf6a9 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -165,6 +165,7 @@ enum prom_input_device {
165 PROMDEV_ITTYA, /* input from ttya */ 165 PROMDEV_ITTYA, /* input from ttya */
166 PROMDEV_ITTYB, /* input from ttyb */ 166 PROMDEV_ITTYB, /* input from ttyb */
167 PROMDEV_IRSC, /* input from rsc */ 167 PROMDEV_IRSC, /* input from rsc */
168 PROMDEV_IVCONS, /* input from virtual-console */
168 PROMDEV_I_UNK, 169 PROMDEV_I_UNK,
169}; 170};
170 171
@@ -177,6 +178,7 @@ enum prom_output_device {
177 PROMDEV_OTTYA, /* to ttya */ 178 PROMDEV_OTTYA, /* to ttya */
178 PROMDEV_OTTYB, /* to ttyb */ 179 PROMDEV_OTTYB, /* to ttyb */
179 PROMDEV_ORSC, /* to rsc */ 180 PROMDEV_ORSC, /* to rsc */
181 PROMDEV_OVCONS, /* to virtual-console */
180 PROMDEV_O_UNK, 182 PROMDEV_O_UNK,
181}; 183};
182 184
diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h
index f8f1ec1f06e6..3cf132e1aa25 100644
--- a/include/asm-sparc/uaccess.h
+++ b/include/asm-sparc/uaccess.h
@@ -120,17 +120,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
120default: __pu_ret = __put_user_bad(); break; \ 120default: __pu_ret = __put_user_bad(); break; \
121} } else { __pu_ret = -EFAULT; } __pu_ret; }) 121} } else { __pu_ret = -EFAULT; } __pu_ret; })
122 122
123#define __put_user_check_ret(x,addr,size,retval) ({ \
124register int __foo __asm__ ("l1"); \
125if (__access_ok(addr,size)) { \
126switch (size) { \
127case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \
128case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \
129case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \
130case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \
131default: if (__put_user_bad()) return retval; break; \
132} } else return retval; })
133
134#define __put_user_nocheck(x,addr,size) ({ \ 123#define __put_user_nocheck(x,addr,size) ({ \
135register int __pu_ret; \ 124register int __pu_ret; \
136switch (size) { \ 125switch (size) { \
@@ -141,16 +130,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
141default: __pu_ret = __put_user_bad(); break; \ 130default: __pu_ret = __put_user_bad(); break; \
142} __pu_ret; }) 131} __pu_ret; })
143 132
144#define __put_user_nocheck_ret(x,addr,size,retval) ({ \
145register int __foo __asm__ ("l1"); \
146switch (size) { \
147case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \
148case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \
149case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \
150case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \
151default: if (__put_user_bad()) return retval; break; \
152} })
153
154#define __put_user_asm(x,size,addr,ret) \ 133#define __put_user_asm(x,size,addr,ret) \
155__asm__ __volatile__( \ 134__asm__ __volatile__( \
156 "/* Put user asm, inline. */\n" \ 135 "/* Put user asm, inline. */\n" \
@@ -170,32 +149,6 @@ __asm__ __volatile__( \
170 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ 149 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
171 "i" (-EFAULT)) 150 "i" (-EFAULT))
172 151
173#define __put_user_asm_ret(x,size,addr,ret,foo) \
174if (__builtin_constant_p(ret) && ret == -EFAULT) \
175__asm__ __volatile__( \
176 "/* Put user asm ret, inline. */\n" \
177"1:\t" "st"#size " %1, %2\n\n\t" \
178 ".section __ex_table,#alloc\n\t" \
179 ".align 4\n\t" \
180 ".word 1b, __ret_efault\n\n\t" \
181 ".previous\n\n\t" \
182 : "=r" (foo) : "r" (x), "m" (*__m(addr))); \
183else \
184__asm__ __volatile( \
185 "/* Put user asm ret, inline. */\n" \
186"1:\t" "st"#size " %1, %2\n\n\t" \
187 ".section .fixup,#alloc,#execinstr\n\t" \
188 ".align 4\n" \
189"3:\n\t" \
190 "ret\n\t" \
191 " restore %%g0, %3, %%o0\n\t" \
192 ".previous\n\n\t" \
193 ".section __ex_table,#alloc\n\t" \
194 ".align 4\n\t" \
195 ".word 1b, 3b\n\n\t" \
196 ".previous\n\n\t" \
197 : "=r" (foo) : "r" (x), "m" (*__m(addr)), "i" (ret))
198
199extern int __put_user_bad(void); 152extern int __put_user_bad(void);
200 153
201#define __get_user_check(x,addr,size,type) ({ \ 154#define __get_user_check(x,addr,size,type) ({ \
diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h
index 02af289e3f46..35cb5c9e0c92 100644
--- a/include/asm-sparc64/a.out.h
+++ b/include/asm-sparc64/a.out.h
@@ -95,7 +95,11 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */
95 95
96#ifdef __KERNEL__ 96#ifdef __KERNEL__
97 97
98#define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L) 98#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
99#define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL))
100
101#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
102 STACK_TOP32 : STACK_TOP64)
99 103
100#endif 104#endif
101 105
diff --git a/include/asm-sparc64/asi.h b/include/asm-sparc64/asi.h
index 534855660f2a..662a21107ae6 100644
--- a/include/asm-sparc64/asi.h
+++ b/include/asm-sparc64/asi.h
@@ -25,14 +25,27 @@
25 25
26/* SpitFire and later extended ASIs. The "(III)" marker designates 26/* SpitFire and later extended ASIs. The "(III)" marker designates
27 * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates 27 * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates
28 * Chip Multi Threading specific ASIs. 28 * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific
29 * ASIs, "(4V)" designates SUN4V specific ASIs.
29 */ 30 */
30#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ 31#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */
31#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ 32#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */
33#define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */
34#define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */
32#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ 35#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/
33#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ 36#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */
37#define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/
38#define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */
39#define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */
40#define ASI_MMU 0x21 /* (4V) MMU Context Registers */
41#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
42 * secondary, user
43 */
34#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ 44#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */
45#define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */
46#define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */
35#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ 47#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */
48#define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */
36#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ 49#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */
37#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ 50#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */
38#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ 51#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */
@@ -137,6 +150,9 @@
137#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ 150#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/
138#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ 151#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */
139#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ 152#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */
153#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load,
154 * primary, implicit
155 */
140#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ 156#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */
141#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ 157#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */
142#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ 158#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 74de79dca915..c66a81bbc84d 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -1,41 +1,224 @@
1/* cpudata.h: Per-cpu parameters. 1/* cpudata.h: Per-cpu parameters.
2 * 2 *
3 * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#ifndef _SPARC64_CPUDATA_H 6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H 7#define _SPARC64_CPUDATA_H
8 8
9#include <asm/hypervisor.h>
10#include <asm/asi.h>
11
12#ifndef __ASSEMBLY__
13
9#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/threads.h>
10 16
11typedef struct { 17typedef struct {
12 /* Dcache line 1 */ 18 /* Dcache line 1 */
13 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ 19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
14 unsigned int multiplier; 20 unsigned int multiplier;
15 unsigned int counter; 21 unsigned int counter;
16 unsigned int idle_volume; 22 unsigned int __pad1;
17 unsigned long clock_tick; /* %tick's per second */ 23 unsigned long clock_tick; /* %tick's per second */
18 unsigned long udelay_val; 24 unsigned long udelay_val;
19 25
20 /* Dcache line 2 */ 26 /* Dcache line 2, rarely used */
21 unsigned int pgcache_size;
22 unsigned int __pad1;
23 unsigned long *pte_cache[2];
24 unsigned long *pgd_cache;
25
26 /* Dcache line 3, rarely used */
27 unsigned int dcache_size; 27 unsigned int dcache_size;
28 unsigned int dcache_line_size; 28 unsigned int dcache_line_size;
29 unsigned int icache_size; 29 unsigned int icache_size;
30 unsigned int icache_line_size; 30 unsigned int icache_line_size;
31 unsigned int ecache_size; 31 unsigned int ecache_size;
32 unsigned int ecache_line_size; 32 unsigned int ecache_line_size;
33 unsigned int __pad2;
34 unsigned int __pad3; 33 unsigned int __pad3;
34 unsigned int __pad4;
35} cpuinfo_sparc; 35} cpuinfo_sparc;
36 36
37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
39#define local_cpu_data() __get_cpu_var(__cpu_data) 39#define local_cpu_data() __get_cpu_var(__cpu_data)
40 40
41/* Trap handling code needs to get at a few critical values upon
42 * trap entry and to process TSB misses. These cannot be in the
43 * per_cpu() area as we really need to lock them into the TLB and
44 * thus make them part of the main kernel image. As a result we
45 * try to make this as small as possible.
46 *
47 * This is padded out and aligned to 64-bytes to avoid false sharing
48 * on SMP.
49 */
50
51/* If you modify the size of this structure, please update
52 * TRAP_BLOCK_SZ_SHIFT below.
53 */
54struct thread_info;
55struct trap_per_cpu {
56/* D-cache line 1: Basic thread information, cpu and device mondo queues */
57 struct thread_info *thread;
58 unsigned long pgd_paddr;
59 unsigned long cpu_mondo_pa;
60 unsigned long dev_mondo_pa;
61
62/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
63 unsigned long resum_mondo_pa;
64 unsigned long resum_kernel_buf_pa;
65 unsigned long nonresum_mondo_pa;
66 unsigned long nonresum_kernel_buf_pa;
67
68/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
69 struct hv_fault_status fault_info;
70
71/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
72 unsigned long cpu_mondo_block_pa;
73 unsigned long cpu_list_pa;
74 unsigned long __pad1[2];
75
76/* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */
77 unsigned long __pad2[4];
78} __attribute__((aligned(64)));
79extern struct trap_per_cpu trap_block[NR_CPUS];
80extern void init_cur_cpu_trap(struct thread_info *);
81extern void setup_tba(void);
82
83struct cpuid_patch_entry {
84 unsigned int addr;
85 unsigned int cheetah_safari[4];
86 unsigned int cheetah_jbus[4];
87 unsigned int starfire[4];
88 unsigned int sun4v[4];
89};
90extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
91
92struct sun4v_1insn_patch_entry {
93 unsigned int addr;
94 unsigned int insn;
95};
96extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
97 __sun4v_1insn_patch_end;
98
99struct sun4v_2insn_patch_entry {
100 unsigned int addr;
101 unsigned int insns[2];
102};
103extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
104 __sun4v_2insn_patch_end;
105
106#endif /* !(__ASSEMBLY__) */
107
108#define TRAP_PER_CPU_THREAD 0x00
109#define TRAP_PER_CPU_PGD_PADDR 0x08
110#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
111#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
112#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
113#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
114#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
115#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
116#define TRAP_PER_CPU_FAULT_INFO 0x40
117#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
118#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
119
120#define TRAP_BLOCK_SZ_SHIFT 8
121
122#include <asm/scratchpad.h>
123
124#define __GET_CPUID(REG) \
125 /* Spitfire implementation (default). */ \
126661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
127 srlx REG, 17, REG; \
128 and REG, 0x1f, REG; \
129 nop; \
130 .section .cpuid_patch, "ax"; \
131 /* Instruction location. */ \
132 .word 661b; \
133 /* Cheetah Safari implementation. */ \
134 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
135 srlx REG, 17, REG; \
136 and REG, 0x3ff, REG; \
137 nop; \
138 /* Cheetah JBUS implementation. */ \
139 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
140 srlx REG, 17, REG; \
141 and REG, 0x1f, REG; \
142 nop; \
143 /* Starfire implementation. */ \
144 sethi %hi(0x1fff40000d0 >> 9), REG; \
145 sllx REG, 9, REG; \
146 or REG, 0xd0, REG; \
147 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
148 /* sun4v implementation. */ \
149 mov SCRATCHPAD_CPUID, REG; \
150 ldxa [REG] ASI_SCRATCHPAD, REG; \
151 nop; \
152 nop; \
153 .previous;
154
155#ifdef CONFIG_SMP
156
157#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
158 __GET_CPUID(TMP) \
159 sethi %hi(trap_block), DEST; \
160 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
161 or DEST, %lo(trap_block), DEST; \
162 add DEST, TMP, DEST; \
163
164/* Clobbers TMP, current address space PGD phys address into DEST. */
165#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
166 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
167 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
168
169/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
170#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
171 __GET_CPUID(TMP) \
172 sethi %hi(__irq_work), DEST; \
173 sllx TMP, 6, TMP; \
174 or DEST, %lo(__irq_work), DEST; \
175 add DEST, TMP, DEST;
176
177/* Clobbers TMP, loads DEST with current thread info pointer. */
178#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
179 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
180 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
181
182/* Given the current thread info pointer in THR, load the per-cpu
183 * area base of the current processor into DEST. REG1, REG2, and REG3 are
184 * clobbered.
185 *
186 * You absolutely cannot use DEST as a temporary in this code. The
187 * reason is that traps can happen during execution, and return from
188 * trap will load the fully resolved DEST per-cpu base. This can corrupt
189 * the calculations done by the macro mid-stream.
190 */
191#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
192 ldub [THR + TI_CPU], REG1; \
193 sethi %hi(__per_cpu_shift), REG3; \
194 sethi %hi(__per_cpu_base), REG2; \
195 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
196 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
197 sllx REG1, REG3, REG3; \
198 add REG3, REG2, DEST;
199
200#else
201
202#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
203 sethi %hi(trap_block), DEST; \
204 or DEST, %lo(trap_block), DEST; \
205
206/* Uniprocessor versions, we know the cpuid is zero. */
207#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
208 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
209 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
210
211#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
212 sethi %hi(__irq_work), DEST; \
213 or DEST, %lo(__irq_work), DEST;
214
215#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
216 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
217 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
218
219/* No per-cpu areas on uniprocessor, so no need to load DEST. */
220#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
221
222#endif /* !(CONFIG_SMP) */
223
41#endif /* _SPARC64_CPUDATA_H */ 224#endif /* _SPARC64_CPUDATA_H */
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 69539a8ab833..303d85e2f82e 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -10,6 +10,7 @@
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13#include <asm/spitfire.h>
13#endif 14#endif
14 15
15/* 16/*
@@ -68,6 +69,7 @@
68#define HWCAP_SPARC_MULDIV 8 69#define HWCAP_SPARC_MULDIV 8
69#define HWCAP_SPARC_V9 16 70#define HWCAP_SPARC_V9 16
70#define HWCAP_SPARC_ULTRA3 32 71#define HWCAP_SPARC_ULTRA3 32
72#define HWCAP_SPARC_BLKINIT 64
71 73
72/* 74/*
73 * These are used to set parameters in the core dumps. 75 * These are used to set parameters in the core dumps.
@@ -145,11 +147,21 @@ typedef struct {
145 instruction set this cpu supports. */ 147 instruction set this cpu supports. */
146 148
147/* On Ultra, we support all of the v8 capabilities. */ 149/* On Ultra, we support all of the v8 capabilities. */
148#define ELF_HWCAP ((HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \ 150static inline unsigned int sparc64_elf_hwcap(void)
149 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | \ 151{
150 HWCAP_SPARC_V9) | \ 152 unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
151 ((tlb_type == cheetah || tlb_type == cheetah_plus) ? \ 153 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
152 HWCAP_SPARC_ULTRA3 : 0)) 154 HWCAP_SPARC_V9);
155
156 if (tlb_type == cheetah || tlb_type == cheetah_plus)
157 cap |= HWCAP_SPARC_ULTRA3;
158 else if (tlb_type == hypervisor)
159 cap |= HWCAP_SPARC_BLKINIT;
160
161 return cap;
162}
163
164#define ELF_HWCAP sparc64_elf_hwcap();
153 165
154/* This yields a string that ld.so will use to load implementation 166/* This yields a string that ld.so will use to load implementation
155 specific libraries for optimization. This is more specific in 167 specific libraries for optimization. This is more specific in
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index 0abd3a674e8f..67960a751f4d 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -4,12 +4,21 @@
4 4
5#include <asm/pstate.h> 5#include <asm/pstate.h>
6 6
7 /* wrpr %g0, val, %gl */
8#define SET_GL(val) \
9 .word 0xa1902000 | val
10
11 /* rdpr %gl, %gN */
12#define GET_GL_GLOBAL(N) \
13 .word 0x81540000 | (N << 25)
14
7#define KERNBASE 0x400000 15#define KERNBASE 0x400000
8 16
9#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) 17#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
10 18
11#define __CHEETAH_ID 0x003e0014 19#define __CHEETAH_ID 0x003e0014
12#define __JALAPENO_ID 0x003e0016 20#define __JALAPENO_ID 0x003e0016
21#define __SERRANO_ID 0x003e0022
13 22
14#define CHEETAH_MANUF 0x003e 23#define CHEETAH_MANUF 0x003e
15#define CHEETAH_IMPL 0x0014 /* Ultra-III */ 24#define CHEETAH_IMPL 0x0014 /* Ultra-III */
@@ -19,6 +28,12 @@
19#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ 28#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */
20#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ 29#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */
21 30
31#define BRANCH_IF_SUN4V(tmp1,label) \
32 sethi %hi(is_sun4v), %tmp1; \
33 lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \
34 brnz,pn %tmp1, label; \
35 nop
36
22#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ 37#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \
23 rdpr %ver, %tmp1; \ 38 rdpr %ver, %tmp1; \
24 sethi %hi(__CHEETAH_ID), %tmp2; \ 39 sethi %hi(__CHEETAH_ID), %tmp2; \
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
new file mode 100644
index 000000000000..612bf319753f
--- /dev/null
+++ b/include/asm-sparc64/hypervisor.h
@@ -0,0 +1,2128 @@
1#ifndef _SPARC64_HYPERVISOR_H
2#define _SPARC64_HYPERVISOR_H
3
4/* Sun4v hypervisor interfaces and defines.
5 *
6 * Hypervisor calls are made via traps to software traps number 0x80
7 * and above. Registers %o0 to %o5 serve as argument, status, and
8 * return value registers.
9 *
10 * There are two kinds of these traps. First there are the normal
11 * "fast traps" which use software trap 0x80 and encode the function
12 * to invoke by number in register %o5. Argument and return value
13 * handling is as follows:
14 *
15 * -----------------------------------------------
16 * | %o5 | function number | undefined |
17 * | %o0 | argument 0 | return status |
18 * | %o1 | argument 1 | return value 1 |
19 * | %o2 | argument 2 | return value 2 |
20 * | %o3 | argument 3 | return value 3 |
21 * | %o4 | argument 4 | return value 4 |
22 * -----------------------------------------------
23 *
24 * The second type are "hyper-fast traps" which encode the function
25 * number in the software trap number itself. So these use trap
26 * numbers > 0x80. The register usage for hyper-fast traps is as
27 * follows:
28 *
29 * -----------------------------------------------
30 * | %o0 | argument 0 | return status |
31 * | %o1 | argument 1 | return value 1 |
32 * | %o2 | argument 2 | return value 2 |
33 * | %o3 | argument 3 | return value 3 |
34 * | %o4 | argument 4 | return value 4 |
35 * -----------------------------------------------
36 *
37 * Registers providing explicit arguments to the hypervisor calls
38 * are volatile across the call. Upon return their values are
39 * undefined unless explicitly specified as containing a particular
40 * return value by the specific call. The return status is always
41 * returned in register %o0, zero indicates a successful execution of
42 * the hypervisor call and other values indicate an error status as
43 * defined below. So, for example, if a hyper-fast trap takes
44 * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across
45 * the call and %o3, %o4, and %o5 would be preserved.
46 *
47 * If the hypervisor trap is invalid, or the fast trap function number
48 * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits
49 * of the argument and return values are significant.
50 */
51
52/* Trap numbers. */
53#define HV_FAST_TRAP 0x80
54#define HV_MMU_MAP_ADDR_TRAP 0x83
55#define HV_MMU_UNMAP_ADDR_TRAP 0x84
56#define HV_TTRACE_ADDENTRY_TRAP 0x85
57#define HV_CORE_TRAP 0xff
58
59/* Error codes. */
60#define HV_EOK 0 /* Successful return */
61#define HV_ENOCPU 1 /* Invalid CPU id */
62#define HV_ENORADDR 2 /* Invalid real address */
63#define HV_ENOINTR 3 /* Invalid interrupt id */
64#define HV_EBADPGSZ 4 /* Invalid pagesize encoding */
65#define HV_EBADTSB 5 /* Invalid TSB description */
66#define HV_EINVAL 6 /* Invalid argument */
67#define HV_EBADTRAP 7 /* Invalid function number */
68#define HV_EBADALIGN 8 /* Invalid address alignment */
69#define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */
70#define HV_ENOACCESS 10 /* No access to resource */
71#define HV_EIO 11 /* I/O error */
72#define HV_ECPUERROR 12 /* CPU in error state */
73#define HV_ENOTSUPPORTED 13 /* Function not supported */
74#define HV_ENOMAP 14 /* No mapping found */
75#define HV_ETOOMANY 15 /* Too many items specified */
76
77/* mach_exit()
78 * TRAP: HV_FAST_TRAP
79 * FUNCTION: HV_FAST_MACH_EXIT
80 * ARG0: exit code
81 * ERRORS: This service does not return.
82 *
83 * Stop all CPUs in the virtual domain and place them into the stopped
84 * state. The 64-bit exit code may be passed to a service entity as
85 * the domain's exit status. On systems without a service entity, the
86 * domain will undergo a reset, and the boot firmware will be
87 * reloaded.
88 *
89 * This function will never return to the guest that invokes it.
90 *
91 * Note: By convention an exit code of zero denotes a successful exit by
92 * the guest code. A non-zero exit code denotes a guest specific
93 * error indication.
94 *
95 */
96#define HV_FAST_MACH_EXIT 0x00
97
98/* Domain services. */
99
100/* mach_desc()
101 * TRAP: HV_FAST_TRAP
102 * FUNCTION: HV_FAST_MACH_DESC
103 * ARG0: buffer
104 * ARG1: length
105 * RET0: status
106 * RET1: length
107 * ERRORS: HV_EBADALIGN Buffer is badly aligned
108 * HV_ENORADDR Buffer is to an illegal real address.
109 * HV_EINVAL Buffer length is too small for complete
110 * machine description.
111 *
112 * Copy the most current machine description into the buffer indicated
113 * by the real address in ARG0. The buffer provided must be 16 byte
114 * aligned. Upon success or HV_EINVAL, this service returns the
115 * actual size of the machine description in the RET1 return value.
116 *
117 * Note: A method of determining the appropriate buffer size for the
118 * machine description is to first call this service with a buffer
119 * length of 0 bytes.
120 */
121#define HV_FAST_MACH_DESC 0x01
122
123/* mach_exit()
124 * TRAP: HV_FAST_TRAP
125 * FUNCTION: HV_FAST_MACH_SIR
126 * ERRORS: This service does not return.
127 *
128 * Perform a software initiated reset of the virtual machine domain.
129 * All CPUs are captured as soon as possible, all hardware devices are
130 * returned to the entry default state, and the domain is restarted at
131 * the SIR (trap type 0x04) real trap table (RTBA) entry point on one
132 * of the CPUs. The single CPU restarted is selected as determined by
133 * platform specific policy. Memory is preserved across this
134 * operation.
135 */
136#define HV_FAST_MACH_SIR 0x02
137
138/* mach_set_soft_state()
139 * TRAP: HV_FAST_TRAP
140 * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE
141 * ARG0: software state
142 * ARG1: software state description pointer
143 * RET0: status
144 * ERRORS: EINVAL software state not valid or software state
145 * description is not NULL terminated
146 * ENORADDR software state description pointer is not a
147 * valid real address
148 * EBADALIGNED software state description is not correctly
149 * aligned
150 *
151 * This allows the guest to report it's soft state to the hypervisor. There
152 * are two primary components to this state. The first part states whether
153 * the guest software is running or not. The second containts optional
154 * details specific to the software.
155 *
156 * The software state argument is defined below in HV_SOFT_STATE_*, and
157 * indicates whether the guest is operating normally or in a transitional
158 * state.
159 *
160 * The software state description argument is a real address of a data buffer
161 * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL
162 * terminated 7-bit ASCII string of up to 31 characters not including the
163 * NULL termination.
164 */
165#define HV_FAST_MACH_SET_SOFT_STATE 0x03
166#define HV_SOFT_STATE_NORMAL 0x01
167#define HV_SOFT_STATE_TRANSITION 0x02
168
169/* mach_get_soft_state()
170 * TRAP: HV_FAST_TRAP
171 * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE
172 * ARG0: software state description pointer
173 * RET0: status
174 * RET1: software state
175 * ERRORS: ENORADDR software state description pointer is not a
176 * valid real address
177 * EBADALIGNED software state description is not correctly
178 * aligned
179 *
180 * Retrieve the current value of the guest's software state. The rules
181 * for the software state pointer are the same as for mach_set_soft_state()
182 * above.
183 */
184#define HV_FAST_MACH_GET_SOFT_STATE 0x04
185
186/* CPU services.
187 *
188 * CPUs represent devices that can execute software threads. A single
189 * chip that contains multiple cores or strands is represented as
190 * multiple CPUs with unique CPU identifiers. CPUs are exported to
191 * OBP via the machine description (and to the OS via the OBP device
192 * tree). CPUs are always in one of three states: stopped, running,
193 * or error.
194 *
195 * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a
196 * CPU within a logical domain. Operations that are to be performed
197 * on multiple CPUs specify them via a CPU list. A CPU list is an
198 * array in real memory, of which each 16-bit word is a CPU ID. CPU
199 * lists are passed through the API as two arguments. The first is
200 * the number of entries (16-bit words) in the CPU list, and the
201 * second is the (real address) pointer to the CPU ID list.
202 */
203
204/* cpu_start()
205 * TRAP: HV_FAST_TRAP
206 * FUNCTION: HV_FAST_CPU_START
207 * ARG0: CPU ID
208 * ARG1: PC
209 * ARG1: RTBA
210 * ARG1: target ARG0
211 * RET0: status
212 * ERRORS: ENOCPU Invalid CPU ID
213 * EINVAL Target CPU ID is not in the stopped state
214 * ENORADDR Invalid PC or RTBA real address
215 * EBADALIGN Unaligned PC or unaligned RTBA
216 * EWOULDBLOCK Starting resources are not available
217 *
218 * Start CPU with given CPU ID with PC in %pc and with a real trap
219 * base address value of RTBA. The indicated CPU must be in the
220 * stopped state. The supplied RTBA must be aligned on a 256 byte
221 * boundary. On successful completion, the specified CPU will be in
222 * the running state and will be supplied with "target ARG0" in %o0
223 * and RTBA in %tba.
224 */
225#define HV_FAST_CPU_START 0x10
226
227/* cpu_stop()
228 * TRAP: HV_FAST_TRAP
229 * FUNCTION: HV_FAST_CPU_STOP
230 * ARG0: CPU ID
231 * RET0: status
232 * ERRORS: ENOCPU Invalid CPU ID
233 * EINVAL Target CPU ID is the current cpu
234 * EINVAL Target CPU ID is not in the running state
235 * EWOULDBLOCK Stopping resources are not available
236 * ENOTSUPPORTED Not supported on this platform
237 *
238 * The specified CPU is stopped. The indicated CPU must be in the
239 * running state. On completion, it will be in the stopped state. It
240 * is not legal to stop the current CPU.
241 *
242 * Note: As this service cannot be used to stop the current cpu, this service
243 * may not be used to stop the last running CPU in a domain. To stop
244 * and exit a running domain, a guest must use the mach_exit() service.
245 */
246#define HV_FAST_CPU_STOP 0x11
247
248/* cpu_yield()
249 * TRAP: HV_FAST_TRAP
250 * FUNCTION: HV_FAST_CPU_YIELD
251 * RET0: status
252 * ERRORS: No possible error.
253 *
254 * Suspend execution on the current CPU. Execution will resume when
255 * an interrupt (device, %stick_compare, or cross-call) is targeted to
256 * the CPU. On some CPUs, this API may be used by the hypervisor to
257 * save power by disabling hardware strands.
258 */
259#define HV_FAST_CPU_YIELD 0x12
260
261#ifndef __ASSEMBLY__
262extern unsigned long sun4v_cpu_yield(void);
263#endif
264
265/* cpu_qconf()
266 * TRAP: HV_FAST_TRAP
267 * FUNCTION: HV_FAST_CPU_QCONF
268 * ARG0: queue
269 * ARG1: base real address
270 * ARG2: number of entries
271 * RET0: status
272 * ERRORS: ENORADDR Invalid base real address
273 * EINVAL Invalid queue or number of entries is less
274 * than 2 or too large.
275 * EBADALIGN Base real address is not correctly aligned
276 * for size.
277 *
278 * Configure the given queue to be placed at the given base real
279 * address, with the given number of entries. The number of entries
280 * must be a power of 2. The base real address must be aligned
281 * exactly to match the queue size. Each queue entry is 64 bytes
282 * long, so for example a 32 entry queue must be aligned on a 2048
283 * byte real address boundary.
284 *
285 * The specified queue is unconfigured if the number of entries is given
286 * as zero.
287 *
288 * For the current version of this API service, the argument queue is defined
289 * as follows:
290 *
291 * queue description
292 * ----- -------------------------
293 * 0x3c cpu mondo queue
294 * 0x3d device mondo queue
295 * 0x3e resumable error queue
296 * 0x3f non-resumable error queue
297 *
298 * Note: The maximum number of entries for each queue for a specific cpu may
299 * be determined from the machine description.
300 */
301#define HV_FAST_CPU_QCONF 0x14
302#define HV_CPU_QUEUE_CPU_MONDO 0x3c
303#define HV_CPU_QUEUE_DEVICE_MONDO 0x3d
304#define HV_CPU_QUEUE_RES_ERROR 0x3e
305#define HV_CPU_QUEUE_NONRES_ERROR 0x3f
306
307#ifndef __ASSEMBLY__
308extern unsigned long sun4v_cpu_qconf(unsigned long type,
309 unsigned long queue_paddr,
310 unsigned long num_queue_entries);
311#endif
312
313/* cpu_qinfo()
314 * TRAP: HV_FAST_TRAP
315 * FUNCTION: HV_FAST_CPU_QINFO
316 * ARG0: queue
317 * RET0: status
318 * RET1: base real address
319 * RET1: number of entries
320 * ERRORS: EINVAL Invalid queue
321 *
322 * Return the configuration info for the given queue. The base real
323 * address and number of entries of the defined queue are returned.
324 * The queue argument values are the same as for cpu_qconf() above.
325 *
326 * If the specified queue is a valid queue number, but no queue has
327 * been defined, the number of entries will be set to zero and the
328 * base real address returned is undefined.
329 */
330#define HV_FAST_CPU_QINFO 0x15
331
332/* cpu_mondo_send()
333 * TRAP: HV_FAST_TRAP
334 * FUNCTION: HV_FAST_CPU_MONDO_SEND
335 * ARG0-1: CPU list
336 * ARG2: data real address
337 * RET0: status
338 * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list
339 * is not 2-byte aligned.
340 * ENORADDR Invalid data mondo address, or invalid cpu list
341 * address.
342 * ENOCPU Invalid cpu in CPU list
343 * EWOULDBLOCK Some or all of the listed CPUs did not receive
344 * the mondo
345 * ECPUERROR One or more of the listed CPUs are in error
346 * state, use HV_FAST_CPU_STATE to see which ones
347 * EINVAL CPU list includes caller's CPU ID
348 *
349 * Send a mondo interrupt to the CPUs in the given CPU list with the
350 * 64-bytes at the given data real address. The data must be 64-byte
351 * aligned. The mondo data will be delivered to the cpu_mondo queues
352 * of the recipient CPUs.
353 *
354 * In all cases, error or not, the CPUs in the CPU list to which the
355 * mondo has been successfully delivered will be indicated by having
356 * their entry in CPU list updated with the value 0xffff.
357 */
358#define HV_FAST_CPU_MONDO_SEND 0x42
359
360#ifndef __ASSEMBLY__
361extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);
362#endif
363
364/* cpu_myid()
365 * TRAP: HV_FAST_TRAP
366 * FUNCTION: HV_FAST_CPU_MYID
367 * RET0: status
368 * RET1: CPU ID
369 * ERRORS: No errors defined.
370 *
371 * Return the hypervisor ID handle for the current CPU. Use by a
372 * virtual CPU to discover it's own identity.
373 */
374#define HV_FAST_CPU_MYID 0x16
375
376/* cpu_state()
377 * TRAP: HV_FAST_TRAP
378 * FUNCTION: HV_FAST_CPU_STATE
379 * ARG0: CPU ID
380 * RET0: status
381 * RET1: state
382 * ERRORS: ENOCPU Invalid CPU ID
383 *
384 * Retrieve the current state of the CPU with the given CPU ID.
385 */
386#define HV_FAST_CPU_STATE 0x17
387#define HV_CPU_STATE_STOPPED 0x01
388#define HV_CPU_STATE_RUNNING 0x02
389#define HV_CPU_STATE_ERROR 0x03
390
391#ifndef __ASSEMBLY__
392extern long sun4v_cpu_state(unsigned long cpuid);
393#endif
394
395/* cpu_set_rtba()
396 * TRAP: HV_FAST_TRAP
397 * FUNCTION: HV_FAST_CPU_SET_RTBA
398 * ARG0: RTBA
399 * RET0: status
400 * RET1: previous RTBA
401 * ERRORS: ENORADDR Invalid RTBA real address
402 * EBADALIGN RTBA is incorrectly aligned for a trap table
403 *
404 * Set the real trap base address of the local cpu to the given RTBA.
405 * The supplied RTBA must be aligned on a 256 byte boundary. Upon
406 * success the previous value of the RTBA is returned in RET1.
407 *
408 * Note: This service does not affect %tba
409 */
410#define HV_FAST_CPU_SET_RTBA 0x18
411
412/* cpu_set_rtba()
413 * TRAP: HV_FAST_TRAP
414 * FUNCTION: HV_FAST_CPU_GET_RTBA
415 * RET0: status
416 * RET1: previous RTBA
417 * ERRORS: No possible error.
418 *
419 * Returns the current value of RTBA in RET1.
420 */
421#define HV_FAST_CPU_GET_RTBA 0x19
422
423/* MMU services.
424 *
425 * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls.
426 */
427#ifndef __ASSEMBLY__
428struct hv_tsb_descr {
429 unsigned short pgsz_idx;
430 unsigned short assoc;
431 unsigned int num_ttes; /* in TTEs */
432 unsigned int ctx_idx;
433 unsigned int pgsz_mask;
434 unsigned long tsb_base;
435 unsigned long resv;
436};
437#endif
438#define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00
439#define HV_TSB_DESCR_ASSOC_OFFSET 0x02
440#define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04
441#define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08
442#define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c
443#define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10
444#define HV_TSB_DESCR_RESV_OFFSET 0x18
445
446/* Page size bitmask. */
447#define HV_PGSZ_MASK_8K (1 << 0)
448#define HV_PGSZ_MASK_64K (1 << 1)
449#define HV_PGSZ_MASK_512K (1 << 2)
450#define HV_PGSZ_MASK_4MB (1 << 3)
451#define HV_PGSZ_MASK_32MB (1 << 4)
452#define HV_PGSZ_MASK_256MB (1 << 5)
453#define HV_PGSZ_MASK_2GB (1 << 6)
454#define HV_PGSZ_MASK_16GB (1 << 7)
455
456/* Page size index. The value given in the TSB descriptor must correspond
457 * to the smallest page size specified in the pgsz_mask page size bitmask.
458 */
459#define HV_PGSZ_IDX_8K 0
460#define HV_PGSZ_IDX_64K 1
461#define HV_PGSZ_IDX_512K 2
462#define HV_PGSZ_IDX_4MB 3
463#define HV_PGSZ_IDX_32MB 4
464#define HV_PGSZ_IDX_256MB 5
465#define HV_PGSZ_IDX_2GB 6
466#define HV_PGSZ_IDX_16GB 7
467
468/* MMU fault status area.
469 *
470 * MMU related faults have their status and fault address information
471 * placed into a memory region made available by privileged code. Each
472 * virtual processor must make a mmu_fault_area_conf() call to tell the
473 * hypervisor where that processor's fault status should be stored.
474 *
475 * The fault status block is a multiple of 64-bytes and must be aligned
476 * on a 64-byte boundary.
477 */
478#ifndef __ASSEMBLY__
479struct hv_fault_status {
480 unsigned long i_fault_type;
481 unsigned long i_fault_addr;
482 unsigned long i_fault_ctx;
483 unsigned long i_reserved[5];
484 unsigned long d_fault_type;
485 unsigned long d_fault_addr;
486 unsigned long d_fault_ctx;
487 unsigned long d_reserved[5];
488};
489#endif
490#define HV_FAULT_I_TYPE_OFFSET 0x00
491#define HV_FAULT_I_ADDR_OFFSET 0x08
492#define HV_FAULT_I_CTX_OFFSET 0x10
493#define HV_FAULT_D_TYPE_OFFSET 0x40
494#define HV_FAULT_D_ADDR_OFFSET 0x48
495#define HV_FAULT_D_CTX_OFFSET 0x50
496
497#define HV_FAULT_TYPE_FAST_MISS 1
498#define HV_FAULT_TYPE_FAST_PROT 2
499#define HV_FAULT_TYPE_MMU_MISS 3
500#define HV_FAULT_TYPE_INV_RA 4
501#define HV_FAULT_TYPE_PRIV_VIOL 5
502#define HV_FAULT_TYPE_PROT_VIOL 6
503#define HV_FAULT_TYPE_NFO 7
504#define HV_FAULT_TYPE_NFO_SEFF 8
505#define HV_FAULT_TYPE_INV_VA 9
506#define HV_FAULT_TYPE_INV_ASI 10
507#define HV_FAULT_TYPE_NC_ATOMIC 11
508#define HV_FAULT_TYPE_PRIV_ACT 12
509#define HV_FAULT_TYPE_RESV1 13
510#define HV_FAULT_TYPE_UNALIGNED 14
511#define HV_FAULT_TYPE_INV_PGSZ 15
512/* Values 16 --> -2 are reserved. */
513#define HV_FAULT_TYPE_MULTIPLE -1
514
515/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(),
516 * and mmu_{map,unmap}_perm_addr().
517 */
518#define HV_MMU_DMMU 0x01
519#define HV_MMU_IMMU 0x02
520#define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU)
521
522/* mmu_map_addr()
523 * TRAP: HV_MMU_MAP_ADDR_TRAP
524 * ARG0: virtual address
525 * ARG1: mmu context
526 * ARG2: TTE
527 * ARG3: flags (HV_MMU_{IMMU,DMMU})
528 * ERRORS: EINVAL Invalid virtual address, mmu context, or flags
529 * EBADPGSZ Invalid page size value
530 * ENORADDR Invalid real address in TTE
531 *
532 * Create a non-permanent mapping using the given TTE, virtual
533 * address, and mmu context. The flags argument determines which
534 * (data, or instruction, or both) TLB the mapping gets loaded into.
535 *
536 * The behavior is undefined if the valid bit is clear in the TTE.
537 *
538 * Note: This API call is for privileged code to specify temporary translation
539 * mappings without the need to create and manage a TSB.
540 */
541
542/* mmu_unmap_addr()
543 * TRAP: HV_MMU_UNMAP_ADDR_TRAP
544 * ARG0: virtual address
545 * ARG1: mmu context
546 * ARG2: flags (HV_MMU_{IMMU,DMMU})
547 * ERRORS: EINVAL Invalid virtual address, mmu context, or flags
548 *
549 * Demaps the given virtual address in the given mmu context on this
550 * CPU. This function is intended to be used to demap pages mapped
551 * with mmu_map_addr. This service is equivalent to invoking
552 * mmu_demap_page() with only the current CPU in the CPU list. The
553 * flags argument determines which (data, or instruction, or both) TLB
554 * the mapping gets unmapped from.
555 *
556 * Attempting to perform an unmap operation for a previously defined
557 * permanent mapping will have undefined results.
558 */
559
560/* mmu_tsb_ctx0()
561 * TRAP: HV_FAST_TRAP
562 * FUNCTION: HV_FAST_MMU_TSB_CTX0
563 * ARG0: number of TSB descriptions
564 * ARG1: TSB descriptions pointer
565 * RET0: status
566 * ERRORS: ENORADDR Invalid TSB descriptions pointer or
567 * TSB base within a descriptor
568 * EBADALIGN TSB descriptions pointer is not aligned
569 * to an 8-byte boundary, or TSB base
570 * within a descriptor is not aligned for
571 * the given TSB size
572 * EBADPGSZ Invalid page size in a TSB descriptor
573 * EBADTSB Invalid associativity or size in a TSB
574 * descriptor
575 * EINVAL Invalid number of TSB descriptions, or
576 * invalid context index in a TSB
577 * descriptor, or index page size not
578 * equal to smallest page size in page
579 * size bitmask field.
580 *
581 * Configures the TSBs for the current CPU for virtual addresses with
582 * context zero. The TSB descriptions pointer is a pointer to an
583 * array of the given number of TSB descriptions.
584 *
585 * Note: The maximum number of TSBs available to a virtual CPU is given by the
586 * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the
587 * machine description.
588 */
589#define HV_FAST_MMU_TSB_CTX0 0x20
590
591/* mmu_tsb_ctxnon0()
592 * TRAP: HV_FAST_TRAP
593 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0
594 * ARG0: number of TSB descriptions
595 * ARG1: TSB descriptions pointer
596 * RET0: status
597 * ERRORS: Same as for mmu_tsb_ctx0() above.
598 *
599 * Configures the TSBs for the current CPU for virtual addresses with
600 * non-zero contexts. The TSB descriptions pointer is a pointer to an
601 * array of the given number of TSB descriptions.
602 *
603 * Note: A maximum of 16 TSBs may be specified in the TSB description list.
604 */
605#define HV_FAST_MMU_TSB_CTXNON0 0x21
606
607/* mmu_demap_page()
608 * TRAP: HV_FAST_TRAP
609 * FUNCTION: HV_FAST_MMU_DEMAP_PAGE
610 * ARG0: reserved, must be zero
611 * ARG1: reserved, must be zero
612 * ARG2: virtual address
613 * ARG3: mmu context
614 * ARG4: flags (HV_MMU_{IMMU,DMMU})
615 * RET0: status
616 * ERRORS: EINVAL Invalid virutal address, context, or
617 * flags value
618 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
619 *
620 * Demaps any page mapping of the given virtual address in the given
621 * mmu context for the current virtual CPU. Any virtually tagged
622 * caches are guaranteed to be kept consistent. The flags argument
623 * determines which TLB (instruction, or data, or both) participate in
624 * the operation.
625 *
626 * ARG0 and ARG1 are both reserved and must be set to zero.
627 */
628#define HV_FAST_MMU_DEMAP_PAGE 0x22
629
630/* mmu_demap_ctx()
631 * TRAP: HV_FAST_TRAP
632 * FUNCTION: HV_FAST_MMU_DEMAP_CTX
633 * ARG0: reserved, must be zero
634 * ARG1: reserved, must be zero
635 * ARG2: mmu context
636 * ARG3: flags (HV_MMU_{IMMU,DMMU})
637 * RET0: status
638 * ERRORS: EINVAL Invalid context or flags value
639 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
640 *
641 * Demaps all non-permanent virtual page mappings previously specified
642 * for the given context for the current virtual CPU. Any virtual
643 * tagged caches are guaranteed to be kept consistent. The flags
644 * argument determines which TLB (instruction, or data, or both)
645 * participate in the operation.
646 *
647 * ARG0 and ARG1 are both reserved and must be set to zero.
648 */
649#define HV_FAST_MMU_DEMAP_CTX 0x23
650
651/* mmu_demap_all()
652 * TRAP: HV_FAST_TRAP
653 * FUNCTION: HV_FAST_MMU_DEMAP_ALL
654 * ARG0: reserved, must be zero
655 * ARG1: reserved, must be zero
656 * ARG2: flags (HV_MMU_{IMMU,DMMU})
657 * RET0: status
658 * ERRORS: EINVAL Invalid flags value
659 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
660 *
661 * Demaps all non-permanent virtual page mappings previously specified
662 * for the current virtual CPU. Any virtual tagged caches are
663 * guaranteed to be kept consistent. The flags argument determines
664 * which TLB (instruction, or data, or both) participate in the
665 * operation.
666 *
667 * ARG0 and ARG1 are both reserved and must be set to zero.
668 */
669#define HV_FAST_MMU_DEMAP_ALL 0x24
670
671/* mmu_map_perm_addr()
672 * TRAP: HV_FAST_TRAP
673 * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR
674 * ARG0: virtual address
675 * ARG1: reserved, must be zero
676 * ARG2: TTE
677 * ARG3: flags (HV_MMU_{IMMU,DMMU})
678 * RET0: status
679 * ERRORS: EINVAL Invalid virutal address or flags value
680 * EBADPGSZ Invalid page size value
681 * ENORADDR Invalid real address in TTE
682 * ETOOMANY Too many mappings (max of 8 reached)
683 *
684 * Create a permanent mapping using the given TTE and virtual address
685 * for context 0 on the calling virtual CPU. A maximum of 8 such
686 * permanent mappings may be specified by privileged code. Mappings
687 * may be removed with mmu_unmap_perm_addr().
688 *
689 * The behavior is undefined if a TTE with the valid bit clear is given.
690 *
691 * Note: This call is used to specify address space mappings for which
692 * privileged code does not expect to receive misses. For example,
693 * this mechanism can be used to map kernel nucleus code and data.
694 */
695#define HV_FAST_MMU_MAP_PERM_ADDR 0x25
696
697/* mmu_fault_area_conf()
698 * TRAP: HV_FAST_TRAP
699 * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF
700 * ARG0: real address
701 * RET0: status
702 * RET1: previous mmu fault area real address
703 * ERRORS: ENORADDR Invalid real address
704 * EBADALIGN Invalid alignment for fault area
705 *
706 * Configure the MMU fault status area for the calling CPU. A 64-byte
707 * aligned real address specifies where MMU fault status information
708 * is placed. The return value is the previously specified area, or 0
709 * for the first invocation. Specifying a fault area at real address
710 * 0 is not allowed.
711 */
712#define HV_FAST_MMU_FAULT_AREA_CONF 0x26
713
714/* mmu_enable()
715 * TRAP: HV_FAST_TRAP
716 * FUNCTION: HV_FAST_MMU_ENABLE
717 * ARG0: enable flag
718 * ARG1: return target address
719 * RET0: status
720 * ERRORS: ENORADDR Invalid real address when disabling
721 * translation.
722 * EBADALIGN The return target address is not
723 * aligned to an instruction.
724 * EINVAL The enable flag request the current
725 * operating mode (e.g. disable if already
726 * disabled)
727 *
728 * Enable or disable virtual address translation for the calling CPU
729 * within the virtual machine domain. If the enable flag is zero,
730 * translation is disabled, any non-zero value will enable
731 * translation.
732 *
733 * When this function returns, the newly selected translation mode
734 * will be active. If the mmu is being enabled, then the return
735 * target address is a virtual address else it is a real address.
736 *
737 * Upon successful completion, control will be returned to the given
738 * return target address (ie. the cpu will jump to that address). On
739 * failure, the previous mmu mode remains and the trap simply returns
740 * as normal with the appropriate error code in RET0.
741 */
742#define HV_FAST_MMU_ENABLE 0x27
743
744/* mmu_unmap_perm_addr()
745 * TRAP: HV_FAST_TRAP
746 * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR
747 * ARG0: virtual address
748 * ARG1: reserved, must be zero
749 * ARG2: flags (HV_MMU_{IMMU,DMMU})
750 * RET0: status
751 * ERRORS: EINVAL Invalid virutal address or flags value
752 * ENOMAP Specified mapping was not found
753 *
754 * Demaps any permanent page mapping (established via
755 * mmu_map_perm_addr()) at the given virtual address for context 0 on
756 * the current virtual CPU. Any virtual tagged caches are guaranteed
757 * to be kept consistent.
758 */
759#define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28
760
761/* mmu_tsb_ctx0_info()
762 * TRAP: HV_FAST_TRAP
763 * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO
764 * ARG0: max TSBs
765 * ARG1: buffer pointer
766 * RET0: status
767 * RET1: number of TSBs
768 * ERRORS: EINVAL Supplied buffer is too small
769 * EBADALIGN The buffer pointer is badly aligned
770 * ENORADDR Invalid real address for buffer pointer
771 *
772 * Return the TSB configuration as previous defined by mmu_tsb_ctx0()
773 * into the provided buffer. The size of the buffer is given in ARG1
774 * in terms of the number of TSB description entries.
775 *
776 * Upon return, RET1 always contains the number of TSB descriptions
777 * previously configured. If zero TSBs were configured, EOK is
778 * returned with RET1 containing 0.
779 */
780#define HV_FAST_MMU_TSB_CTX0_INFO 0x29
781
782/* mmu_tsb_ctxnon0_info()
783 * TRAP: HV_FAST_TRAP
784 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO
785 * ARG0: max TSBs
786 * ARG1: buffer pointer
787 * RET0: status
788 * RET1: number of TSBs
789 * ERRORS: EINVAL Supplied buffer is too small
790 * EBADALIGN The buffer pointer is badly aligned
791 * ENORADDR Invalid real address for buffer pointer
792 *
793 * Return the TSB configuration as previous defined by
794 * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer
795 * is given in ARG1 in terms of the number of TSB description entries.
796 *
797 * Upon return, RET1 always contains the number of TSB descriptions
798 * previously configured. If zero TSBs were configured, EOK is
799 * returned with RET1 containing 0.
800 */
801#define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a
802
803/* mmu_fault_area_info()
804 * TRAP: HV_FAST_TRAP
805 * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO
806 * RET0: status
807 * RET1: fault area real address
808 * ERRORS: No errors defined.
809 *
810 * Return the currently defined MMU fault status area for the current
811 * CPU. The real address of the fault status area is returned in
812 * RET1, or 0 is returned in RET1 if no fault status area is defined.
813 *
814 * Note: mmu_fault_area_conf() may be called with the return value (RET1)
815 * from this service if there is a need to save and restore the fault
816 * area for a cpu.
817 */
818#define HV_FAST_MMU_FAULT_AREA_INFO 0x2b
819
820/* Cache and Memory services. */
821
822/* mem_scrub()
823 * TRAP: HV_FAST_TRAP
824 * FUNCTION: HV_FAST_MEM_SCRUB
825 * ARG0: real address
826 * ARG1: length
827 * RET0: status
828 * RET1: length scrubbed
829 * ERRORS: ENORADDR Invalid real address
830 * EBADALIGN Start address or length are not correctly
831 * aligned
832 * EINVAL Length is zero
833 *
834 * Zero the memory contents in the range real address to real address
835 * plus length minus 1. Also, valid ECC will be generated for that
836 * memory address range. Scrubbing is started at the given real
837 * address, but may not scrub the entire given length. The actual
838 * length scrubbed will be returned in RET1.
839 *
840 * The real address and length must be aligned on an 8K boundary, or
841 * contain the start address and length from a sun4v error report.
842 *
843 * Note: There are two uses for this function. The first use is to block clear
844 * and initialize memory and the second is to scrub an u ncorrectable
845 * error reported via a resumable or non-resumable trap. The second
846 * use requires the arguments to be equal to the real address and length
847 * provided in a sun4v memory error report.
848 */
849#define HV_FAST_MEM_SCRUB 0x31
850
851/* mem_sync()
852 * TRAP: HV_FAST_TRAP
853 * FUNCTION: HV_FAST_MEM_SYNC
854 * ARG0: real address
855 * ARG1: length
856 * RET0: status
857 * RET1: length synced
858 * ERRORS: ENORADDR Invalid real address
859 * EBADALIGN Start address or length are not correctly
860 * aligned
861 * EINVAL Length is zero
862 *
863 * Force the next access within the real address to real address plus
864 * length minus 1 to be fetches from main system memory. Less than
865 * the given length may be synced, the actual amount synced is
866 * returned in RET1. The real address and length must be aligned on
867 * an 8K boundary.
868 */
869#define HV_FAST_MEM_SYNC 0x32
870
871/* Time of day services.
872 *
873 * The hypervisor maintains the time of day on a per-domain basis.
874 * Changing the time of day in one domain does not affect the time of
875 * day on any other domain.
876 *
877 * Time is described by a single unsigned 64-bit word which is the
878 * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1,
879 * 1970).
880 */
881
882/* tod_get()
883 * TRAP: HV_FAST_TRAP
884 * FUNCTION: HV_FAST_TOD_GET
885 * RET0: status
886 * RET1: TOD
887 * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable
888 * ENOTSUPPORTED If TOD not supported on this platform
889 *
890 * Return the current time of day. May block if TOD access is
891 * temporarily not possible.
892 */
893#define HV_FAST_TOD_GET 0x50
894
895/* tod_set()
896 * TRAP: HV_FAST_TRAP
897 * FUNCTION: HV_FAST_TOD_SET
898 * ARG0: TOD
899 * RET0: status
900 * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable
901 * ENOTSUPPORTED If TOD not supported on this platform
902 *
903 * The current time of day is set to the value specified in ARG0. May
904 * block if TOD access is temporarily not possible.
905 */
906#define HV_FAST_TOD_SET 0x51
907
908/* Console services */
909
910/* con_getchar()
911 * TRAP: HV_FAST_TRAP
912 * FUNCTION: HV_FAST_CONS_GETCHAR
913 * RET0: status
914 * RET1: character
915 * ERRORS: EWOULDBLOCK No character available.
916 *
917 * Returns a character from the console device. If no character is
918 * available then an EWOULDBLOCK error is returned. If a character is
919 * available, then the returned status is EOK and the character value
920 * is in RET1.
921 *
922 * A virtual BREAK is represented by the 64-bit value -1.
923 *
924 * A virtual HUP signal is represented by the 64-bit value -2.
925 */
926#define HV_FAST_CONS_GETCHAR 0x60
927
928/* con_putchar()
929 * TRAP: HV_FAST_TRAP
930 * FUNCTION: HV_FAST_CONS_PUTCHAR
931 * ARG0: character
932 * RET0: status
933 * ERRORS: EINVAL Illegal character
934 * EWOULDBLOCK Output buffer currently full, would block
935 *
936 * Send a character to the console device. Only character values
937 * between 0 and 255 may be used. Values outside this range are
938 * invalid except for the 64-bit value -1 which is used to send a
939 * virtual BREAK.
940 */
941#define HV_FAST_CONS_PUTCHAR 0x61
942
943/* Trap trace services.
944 *
945 * The hypervisor provides a trap tracing capability for privileged
946 * code running on each virtual CPU. Privileged code provides a
947 * round-robin trap trace queue within which the hypervisor writes
948 * 64-byte entries detailing hyperprivileged traps taken n behalf of
949 * privileged code. This is provided as a debugging capability for
950 * privileged code.
951 *
952 * The trap trace control structure is 64-bytes long and placed at the
953 * start (offset 0) of the trap trace buffer, and is described as
954 * follows:
955 */
956#ifndef __ASSEMBLY__
957struct hv_trap_trace_control {
958 unsigned long head_offset;
959 unsigned long tail_offset;
960 unsigned long __reserved[0x30 / sizeof(unsigned long)];
961};
962#endif
963#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00
964#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08
965
966/* The head offset is the offset of the most recently completed entry
967 * in the trap-trace buffer. The tail offset is the offset of the
968 * next entry to be written. The control structure is owned and
969 * modified by the hypervisor. A guest may not modify the control
970 * structure contents. Attempts to do so will result in undefined
971 * behavior for the guest.
972 *
973 * Each trap trace buffer entry is layed out as follows:
974 */
975#ifndef __ASSEMBLY__
976struct hv_trap_trace_entry {
977 unsigned char type; /* Hypervisor or guest entry? */
978 unsigned char hpstate; /* Hyper-privileged state */
979 unsigned char tl; /* Trap level */
980 unsigned char gl; /* Global register level */
981 unsigned short tt; /* Trap type */
982 unsigned short tag; /* Extended trap identifier */
983 unsigned long tstate; /* Trap state */
984 unsigned long tick; /* Tick */
985 unsigned long tpc; /* Trap PC */
986 unsigned long f1; /* Entry specific */
987 unsigned long f2; /* Entry specific */
988 unsigned long f3; /* Entry specific */
989 unsigned long f4; /* Entry specific */
990};
991#endif
992#define HV_TRAP_TRACE_ENTRY_TYPE 0x00
993#define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01
994#define HV_TRAP_TRACE_ENTRY_TL 0x02
995#define HV_TRAP_TRACE_ENTRY_GL 0x03
996#define HV_TRAP_TRACE_ENTRY_TT 0x04
997#define HV_TRAP_TRACE_ENTRY_TAG 0x06
998#define HV_TRAP_TRACE_ENTRY_TSTATE 0x08
999#define HV_TRAP_TRACE_ENTRY_TICK 0x10
1000#define HV_TRAP_TRACE_ENTRY_TPC 0x18
1001#define HV_TRAP_TRACE_ENTRY_F1 0x20
1002#define HV_TRAP_TRACE_ENTRY_F2 0x28
1003#define HV_TRAP_TRACE_ENTRY_F3 0x30
1004#define HV_TRAP_TRACE_ENTRY_F4 0x38
1005
1006/* The type field is encoded as follows. */
1007#define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */
1008#define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */
1009#define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */
1010
1011/* ttrace_buf_conf()
1012 * TRAP: HV_FAST_TRAP
1013 * FUNCTION: HV_FAST_TTRACE_BUF_CONF
1014 * ARG0: real address
1015 * ARG1: number of entries
1016 * RET0: status
1017 * RET1: number of entries
1018 * ERRORS: ENORADDR Invalid real address
1019 * EINVAL Size is too small
1020 * EBADALIGN Real address not aligned on 64-byte boundary
1021 *
1022 * Requests hypervisor trap tracing and declares a virtual CPU's trap
1023 * trace buffer to the hypervisor. The real address supplies the real
1024 * base address of the trap trace queue and must be 64-byte aligned.
1025 * Specifying a value of 0 for the number of entries disables trap
1026 * tracing for the calling virtual CPU. The buffer allocated must be
1027 * sized for a power of two number of 64-byte trap trace entries plus
1028 * an initial 64-byte control structure.
1029 *
1030 * This may be invoked any number of times so that a virtual CPU may
1031 * relocate a trap trace buffer or create "snapshots" of information.
1032 *
1033 * If the real address is illegal or badly aligned, then trap tracing
1034 * is disabled and an error is returned.
1035 *
1036 * Upon failure with EINVAL, this service call returns in RET1 the
1037 * minimum number of buffer entries required. Upon other failures
1038 * RET1 is undefined.
1039 */
1040#define HV_FAST_TTRACE_BUF_CONF 0x90
1041
1042/* ttrace_buf_info()
1043 * TRAP: HV_FAST_TRAP
1044 * FUNCTION: HV_FAST_TTRACE_BUF_INFO
1045 * RET0: status
1046 * RET1: real address
1047 * RET2: size
1048 * ERRORS: None defined.
1049 *
1050 * Returns the size and location of the previously declared trap-trace
1051 * buffer. In the event that no buffer was previously defined, or the
1052 * buffer is disabled, this call will return a size of zero bytes.
1053 */
1054#define HV_FAST_TTRACE_BUF_INFO 0x91
1055
1056/* ttrace_enable()
1057 * TRAP: HV_FAST_TRAP
1058 * FUNCTION: HV_FAST_TTRACE_ENABLE
1059 * ARG0: enable
1060 * RET0: status
1061 * RET1: previous enable state
1062 * ERRORS: EINVAL No trap trace buffer currently defined
1063 *
1064 * Enable or disable trap tracing, and return the previous enabled
1065 * state in RET1. Future systems may define various flags for the
1066 * enable argument (ARG0), for the moment a guest should pass
1067 * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all
1068 * tracing - which will ensure future compatability.
1069 */
1070#define HV_FAST_TTRACE_ENABLE 0x92
1071
1072/* ttrace_freeze()
1073 * TRAP: HV_FAST_TRAP
1074 * FUNCTION: HV_FAST_TTRACE_FREEZE
1075 * ARG0: freeze
1076 * RET0: status
1077 * RET1: previous freeze state
1078 * ERRORS: EINVAL No trap trace buffer currently defined
1079 *
1080 * Freeze or unfreeze trap tracing, returning the previous freeze
1081 * state in RET1. A guest should pass a non-zero value to freeze and
1082 * a zero value to unfreeze all tracing. The returned previous state
1083 * is 0 for not frozen and 1 for frozen.
1084 */
1085#define HV_FAST_TTRACE_FREEZE 0x93
1086
1087/* ttrace_addentry()
1088 * TRAP: HV_TTRACE_ADDENTRY_TRAP
1089 * ARG0: tag (16-bits)
1090 * ARG1: data word 0
1091 * ARG2: data word 1
1092 * ARG3: data word 2
1093 * ARG4: data word 3
1094 * RET0: status
1095 * ERRORS: EINVAL No trap trace buffer currently defined
1096 *
1097 * Add an entry to the trap trace buffer. Upon return only ARG0/RET0
1098 * is modified - none of the other registers holding arguments are
1099 * volatile across this hypervisor service.
1100 */
1101
1102/* Core dump services.
1103 *
1104 * Since the hypervisor viraulizes and thus obscures a lot of the
1105 * physical machine layout and state, traditional OS crash dumps can
1106 * be difficult to diagnose especially when the problem is a
1107 * configuration error of some sort.
1108 *
1109 * The dump services provide an opaque buffer into which the
1110 * hypervisor can place it's internal state in order to assist in
1111 * debugging such situations. The contents are opaque and extremely
1112 * platform and hypervisor implementation specific. The guest, during
1113 * a core dump, requests that the hypervisor update any information in
1114 * the dump buffer in preparation to being dumped as part of the
1115 * domain's memory image.
1116 */
1117
1118/* dump_buf_update()
1119 * TRAP: HV_FAST_TRAP
1120 * FUNCTION: HV_FAST_DUMP_BUF_UPDATE
1121 * ARG0: real address
1122 * ARG1: size
1123 * RET0: status
1124 * RET1: required size of dump buffer
1125 * ERRORS: ENORADDR Invalid real address
1126 * EBADALIGN Real address is not aligned on a 64-byte
1127 * boundary
1128 * EINVAL Size is non-zero but less than minimum size
1129 * required
1130 * ENOTSUPPORTED Operation not supported on current logical
1131 * domain
1132 *
1133 * Declare a domain dump buffer to the hypervisor. The real address
1134 * provided for the domain dump buffer must be 64-byte aligned. The
1135 * size specifies the size of the dump buffer and may be larger than
1136 * the minimum size specified in the machine description. The
1137 * hypervisor will fill the dump buffer with opaque data.
1138 *
1139 * Note: A guest may elect to include dump buffer contents as part of a crash
1140 * dump to assist with debugging. This function may be called any number
1141 * of times so that a guest may relocate a dump buffer, or create
1142 * "snapshots" of any dump-buffer information. Each call to
1143 * dump_buf_update() atomically declares the new dump buffer to the
1144 * hypervisor.
1145 *
1146 * A specified size of 0 unconfigures the dump buffer. If the real
1147 * address is illegal or badly aligned, then any currently active dump
1148 * buffer is disabled and an error is returned.
1149 *
1150 * In the event that the call fails with EINVAL, RET1 contains the
1151 * minimum size requires by the hypervisor for a valid dump buffer.
1152 */
1153#define HV_FAST_DUMP_BUF_UPDATE 0x94
1154
1155/* dump_buf_info()
1156 * TRAP: HV_FAST_TRAP
1157 * FUNCTION: HV_FAST_DUMP_BUF_INFO
1158 * RET0: status
1159 * RET1: real address of current dump buffer
1160 * RET2: size of current dump buffer
1161 * ERRORS: No errors defined.
1162 *
1163 * Return the currently configures dump buffer description. A
1164 * returned size of 0 bytes indicates an undefined dump buffer. In
1165 * this case the return address in RET1 is undefined.
1166 */
1167#define HV_FAST_DUMP_BUF_INFO 0x95
1168
1169/* Device interrupt services.
1170 *
1171 * Device interrupts are allocated to system bus bridges by the hypervisor,
1172 * and described to OBP in the machine description. OBP then describes
1173 * these interrupts to the OS via properties in the device tree.
1174 *
1175 * Terminology:
1176 *
1177 * cpuid Unique opaque value which represents a target cpu.
1178 *
1179 * devhandle Device handle. It uniquely identifies a device, and
1180 * consistes of the lower 28-bits of the hi-cell of the
1181 * first entry of the device's "reg" property in the
1182 * OBP device tree.
1183 *
1184 * devino Device interrupt number. Specifies the relative
1185 * interrupt number within the device. The unique
1186 * combination of devhandle and devino are used to
1187 * identify a specific device interrupt.
1188 *
1189 * Note: The devino value is the same as the values in the
1190 * "interrupts" property or "interrupt-map" property
1191 * in the OBP device tree for that device.
1192 *
1193 * sysino System interrupt number. A 64-bit unsigned interger
1194 * representing a unique interrupt within a virtual
1195 * machine.
1196 *
1197 * intr_state A flag representing the interrupt state for a given
1198 * sysino. The state values are defined below.
1199 *
1200 * intr_enabled A flag representing the 'enabled' state for a given
1201 * sysino. The enable values are defined below.
1202 */
1203
1204#define HV_INTR_STATE_IDLE 0 /* Nothing pending */
1205#define HV_INTR_STATE_RECEIVED 1 /* Interrupt received by hardware */
1206#define HV_INTR_STATE_DELIVERED 2 /* Interrupt delivered to queue */
1207
1208#define HV_INTR_DISABLED 0 /* sysino not enabled */
1209#define HV_INTR_ENABLED 1 /* sysino enabled */
1210
1211/* intr_devino_to_sysino()
1212 * TRAP: HV_FAST_TRAP
1213 * FUNCTION: HV_FAST_INTR_DEVINO2SYSINO
1214 * ARG0: devhandle
1215 * ARG1: devino
1216 * RET0: status
1217 * RET1: sysino
1218 * ERRORS: EINVAL Invalid devhandle/devino
1219 *
1220 * Converts a device specific interrupt number of the given
1221 * devhandle/devino into a system specific ino (sysino).
1222 */
1223#define HV_FAST_INTR_DEVINO2SYSINO 0xa0
1224
1225#ifndef __ASSEMBLY__
1226extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
1227 unsigned long devino);
1228#endif
1229
1230/* intr_getenabled()
1231 * TRAP: HV_FAST_TRAP
1232 * FUNCTION: HV_FAST_INTR_GETENABLED
1233 * ARG0: sysino
1234 * RET0: status
1235 * RET1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1236 * ERRORS: EINVAL Invalid sysino
1237 *
1238 * Returns interrupt enabled state in RET1 for the interrupt defined
1239 * by the given sysino.
1240 */
1241#define HV_FAST_INTR_GETENABLED 0xa1
1242
1243#ifndef __ASSEMBLY__
1244extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
1245#endif
1246
1247/* intr_setenabled()
1248 * TRAP: HV_FAST_TRAP
1249 * FUNCTION: HV_FAST_INTR_SETENABLED
1250 * ARG0: sysino
1251 * ARG1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1252 * RET0: status
1253 * ERRORS: EINVAL Invalid sysino or intr_enabled value
1254 *
1255 * Set the 'enabled' state of the interrupt sysino.
1256 */
1257#define HV_FAST_INTR_SETENABLED 0xa2
1258
1259#ifndef __ASSEMBLY__
1260extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
1261#endif
1262
1263/* intr_getstate()
1264 * TRAP: HV_FAST_TRAP
1265 * FUNCTION: HV_FAST_INTR_GETSTATE
1266 * ARG0: sysino
1267 * RET0: status
1268 * RET1: intr_state (HV_INTR_STATE_*)
1269 * ERRORS: EINVAL Invalid sysino
1270 *
1271 * Returns current state of the interrupt defined by the given sysino.
1272 */
1273#define HV_FAST_INTR_GETSTATE 0xa3
1274
1275#ifndef __ASSEMBLY__
1276extern unsigned long sun4v_intr_getstate(unsigned long sysino);
1277#endif
1278
1279/* intr_setstate()
1280 * TRAP: HV_FAST_TRAP
1281 * FUNCTION: HV_FAST_INTR_SETSTATE
1282 * ARG0: sysino
1283 * ARG1: intr_state (HV_INTR_STATE_*)
1284 * RET0: status
1285 * ERRORS: EINVAL Invalid sysino or intr_state value
1286 *
1287 * Sets the current state of the interrupt described by the given sysino
1288 * value.
1289 *
1290 * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending
1291 * interrupt for sysino.
1292 */
1293#define HV_FAST_INTR_SETSTATE 0xa4
1294
1295#ifndef __ASSEMBLY__
1296extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
1297#endif
1298
1299/* intr_gettarget()
1300 * TRAP: HV_FAST_TRAP
1301 * FUNCTION: HV_FAST_INTR_GETTARGET
1302 * ARG0: sysino
1303 * RET0: status
1304 * RET1: cpuid
1305 * ERRORS: EINVAL Invalid sysino
1306 *
1307 * Returns CPU that is the current target of the interrupt defined by
1308 * the given sysino. The CPU value returned is undefined if the target
1309 * has not been set via intr_settarget().
1310 */
1311#define HV_FAST_INTR_GETTARGET 0xa5
1312
1313#ifndef __ASSEMBLY__
1314extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
1315#endif
1316
1317/* intr_settarget()
1318 * TRAP: HV_FAST_TRAP
1319 * FUNCTION: HV_FAST_INTR_SETTARGET
1320 * ARG0: sysino
1321 * ARG1: cpuid
1322 * RET0: status
1323 * ERRORS: EINVAL Invalid sysino
1324 * ENOCPU Invalid cpuid
1325 *
1326 * Set the target CPU for the interrupt defined by the given sysino.
1327 */
1328#define HV_FAST_INTR_SETTARGET 0xa6
1329
1330#ifndef __ASSEMBLY__
1331extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
1332#endif
1333
1334/* PCI IO services.
1335 *
1336 * See the terminology descriptions in the device interrupt services
1337 * section above as those apply here too. Here are terminology
1338 * definitions specific to these PCI IO services:
1339 *
1340 * tsbnum TSB number. Indentifies which io-tsb is used.
1341 * For this version of the specification, tsbnum
1342 * must be zero.
1343 *
1344 * tsbindex TSB index. Identifies which entry in the TSB
1345 * is used. The first entry is zero.
1346 *
1347 * tsbid A 64-bit aligned data structure which contains
1348 * a tsbnum and a tsbindex. Bits 63:32 contain the
1349 * tsbnum and bits 31:00 contain the tsbindex.
1350 *
1351 * Use the HV_PCI_TSBID() macro to construct such
1352 * values.
1353 *
1354 * io_attributes IO attributes for IOMMU mappings. One of more
1355 * of the attritbute bits are stores in a 64-bit
1356 * value. The values are defined below.
1357 *
1358 * r_addr 64-bit real address
1359 *
1360 * pci_device PCI device address. A PCI device address identifies
1361 * a specific device on a specific PCI bus segment.
1362 * A PCI device address ia a 32-bit unsigned integer
1363 * with the following format:
1364 *
1365 * 00000000.bbbbbbbb.dddddfff.00000000
1366 *
1367 * Use the HV_PCI_DEVICE_BUILD() macro to construct
1368 * such values.
1369 *
1370 * pci_config_offset
1371 * PCI configureation space offset. For conventional
1372 * PCI a value between 0 and 255. For extended
1373 * configuration space, a value between 0 and 4095.
1374 *
1375 * Note: For PCI configuration space accesses, the offset
1376 * must be aligned to the access size.
1377 *
1378 * error_flag A return value which specifies if the action succeeded
1379 * or failed. 0 means no error, non-0 means some error
1380 * occurred while performing the service.
1381 *
1382 * io_sync_direction
1383 * Direction definition for pci_dma_sync(), defined
1384 * below in HV_PCI_SYNC_*.
1385 *
1386 * io_page_list A list of io_page_addresses, an io_page_address is
1387 * a real address.
1388 *
1389 * io_page_list_p A pointer to an io_page_list.
1390 *
1391 * "size based byte swap" - Some functions do size based byte swapping
1392 * which allows sw to access pointers and
1393 * counters in native form when the processor
1394 * operates in a different endianness than the
1395 * IO bus. Size-based byte swapping converts a
1396 * multi-byte field between big-endian and
1397 * little-endian format.
1398 */
1399
1400#define HV_PCI_MAP_ATTR_READ 0x01
1401#define HV_PCI_MAP_ATTR_WRITE 0x02
1402
1403#define HV_PCI_DEVICE_BUILD(b,d,f) \
1404 ((((b) & 0xff) << 16) | \
1405 (((d) & 0x1f) << 11) | \
1406 (((f) & 0x07) << 8))
1407
1408#define HV_PCI_TSBID(__tsb_num, __tsb_index) \
1409 ((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index)))
1410
1411#define HV_PCI_SYNC_FOR_DEVICE 0x01
1412#define HV_PCI_SYNC_FOR_CPU 0x02
1413
1414/* pci_iommu_map()
1415 * TRAP: HV_FAST_TRAP
1416 * FUNCTION: HV_FAST_PCI_IOMMU_MAP
1417 * ARG0: devhandle
1418 * ARG1: tsbid
1419 * ARG2: #ttes
1420 * ARG3: io_attributes
1421 * ARG4: io_page_list_p
1422 * RET0: status
1423 * RET1: #ttes mapped
1424 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex/io_attributes
1425 * EBADALIGN Improperly aligned real address
1426 * ENORADDR Invalid real address
1427 *
1428 * Create IOMMU mappings in the sun4v device defined by the given
1429 * devhandle. The mappings are created in the TSB defined by the
1430 * tsbnum component of the given tsbid. The first mapping is created
1431 * in the TSB i ndex defined by the tsbindex component of the given tsbid.
1432 * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex,
1433 * the second at tsbnum, tsbindex + 1, etc.
1434 *
1435 * All mappings are created with the attributes defined by the io_attributes
1436 * argument. The page mapping addresses are described in the io_page_list
1437 * defined by the given io_page_list_p, which is a pointer to the io_page_list.
1438 * The first entry in the io_page_list is the address for the first iotte, the
1439 * 2nd for the 2nd iotte, and so on.
1440 *
1441 * Each io_page_address in the io_page_list must be appropriately aligned.
1442 * #ttes must be greater than zero. For this version of the spec, the tsbnum
1443 * component of the given tsbid must be zero.
1444 *
1445 * Returns the actual number of mappings creates, which may be less than
1446 * or equal to the argument #ttes. If the function returns a value which
1447 * is less than the #ttes, the caller may continus to call the function with
1448 * an updated tsbid, #ttes, io_page_list_p arguments until all pages are
1449 * mapped.
1450 *
1451 * Note: This function does not imply an iotte cache flush. The guest must
1452 * demap an entry before re-mapping it.
1453 */
1454#define HV_FAST_PCI_IOMMU_MAP 0xb0
1455
1456/* pci_iommu_demap()
1457 * TRAP: HV_FAST_TRAP
1458 * FUNCTION: HV_FAST_PCI_IOMMU_DEMAP
1459 * ARG0: devhandle
1460 * ARG1: tsbid
1461 * ARG2: #ttes
1462 * RET0: status
1463 * RET1: #ttes demapped
1464 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex
1465 *
1466 * Demap and flush IOMMU mappings in the device defined by the given
1467 * devhandle. Demaps up to #ttes entries in the TSB defined by the tsbnum
1468 * component of the given tsbid, starting at the TSB index defined by the
1469 * tsbindex component of the given tsbid.
1470 *
1471 * For this version of the spec, the tsbnum of the given tsbid must be zero.
1472 * #ttes must be greater than zero.
1473 *
1474 * Returns the actual number of ttes demapped, which may be less than or equal
1475 * to the argument #ttes. If #ttes demapped is less than #ttes, the caller
1476 * may continue to call this function with updated tsbid and #ttes arguments
1477 * until all pages are demapped.
1478 *
1479 * Note: Entries do not have to be mapped to be demapped. A demap of an
1480 * unmapped page will flush the entry from the tte cache.
1481 */
1482#define HV_FAST_PCI_IOMMU_DEMAP 0xb1
1483
1484/* pci_iommu_getmap()
1485 * TRAP: HV_FAST_TRAP
1486 * FUNCTION: HV_FAST_PCI_IOMMU_GETMAP
1487 * ARG0: devhandle
1488 * ARG1: tsbid
1489 * RET0: status
1490 * RET1: io_attributes
1491 * RET2: real address
1492 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex
1493 * ENOMAP Mapping is not valid, no translation exists
1494 *
1495 * Read and return the mapping in the device described by the given devhandle
1496 * and tsbid. If successful, the io_attributes shall be returned in RET1
1497 * and the page address of the mapping shall be returned in RET2.
1498 *
1499 * For this version of the spec, the tsbnum component of the given tsbid
1500 * must be zero.
1501 */
1502#define HV_FAST_PCI_IOMMU_GETMAP 0xb2
1503
1504/* pci_iommu_getbypass()
1505 * TRAP: HV_FAST_TRAP
1506 * FUNCTION: HV_FAST_PCI_IOMMU_GETBYPASS
1507 * ARG0: devhandle
1508 * ARG1: real address
1509 * ARG2: io_attributes
1510 * RET0: status
1511 * RET1: io_addr
1512 * ERRORS: EINVAL Invalid devhandle/io_attributes
1513 * ENORADDR Invalid real address
1514 * ENOTSUPPORTED Function not supported in this implementation.
1515 *
1516 * Create a "special" mapping in the device described by the given devhandle,
1517 * for the given real address and attributes. Return the IO address in RET1
1518 * if successful.
1519 */
1520#define HV_FAST_PCI_IOMMU_GETBYPASS 0xb3
1521
1522/* pci_config_get()
1523 * TRAP: HV_FAST_TRAP
1524 * FUNCTION: HV_FAST_PCI_CONFIG_GET
1525 * ARG0: devhandle
1526 * ARG1: pci_device
1527 * ARG2: pci_config_offset
1528 * ARG3: size
1529 * RET0: status
1530 * RET1: error_flag
1531 * RET2: data
1532 * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size
1533 * EBADALIGN pci_config_offset not size aligned
1534 * ENOACCESS Access to this offset is not permitted
1535 *
1536 * Read PCI configuration space for the adapter described by the given
1537 * devhandle. Read size (1, 2, or 4) bytes of data from the given
1538 * pci_device, at pci_config_offset from the beginning of the device's
1539 * configuration space. If there was no error, RET1 is set to zero and
1540 * RET2 is set to the data read. Insignificant bits in RET2 are not
1541 * guarenteed to have any specific value and therefore must be ignored.
1542 *
1543 * The data returned in RET2 is size based byte swapped.
1544 *
1545 * If an error occurs during the read, set RET1 to a non-zero value. The
1546 * given pci_config_offset must be 'size' aligned.
1547 */
1548#define HV_FAST_PCI_CONFIG_GET 0xb4
1549
1550/* pci_config_put()
1551 * TRAP: HV_FAST_TRAP
1552 * FUNCTION: HV_FAST_PCI_CONFIG_PUT
1553 * ARG0: devhandle
1554 * ARG1: pci_device
1555 * ARG2: pci_config_offset
1556 * ARG3: size
1557 * ARG4: data
1558 * RET0: status
1559 * RET1: error_flag
1560 * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size
1561 * EBADALIGN pci_config_offset not size aligned
1562 * ENOACCESS Access to this offset is not permitted
1563 *
1564 * Write PCI configuration space for the adapter described by the given
1565 * devhandle. Write size (1, 2, or 4) bytes of data in a single operation,
1566 * at pci_config_offset from the beginning of the device's configuration
1567 * space. The data argument contains the data to be written to configuration
1568 * space. Prior to writing, the data is size based byte swapped.
1569 *
1570 * If an error occurs during the write access, do not generate an error
1571 * report, do set RET1 to a non-zero value. Otherwise RET1 is zero.
1572 * The given pci_config_offset must be 'size' aligned.
1573 *
1574 * This function is permitted to read from offset zero in the configuration
1575 * space described by the given pci_device if necessary to ensure that the
1576 * write access to config space completes.
1577 */
1578#define HV_FAST_PCI_CONFIG_PUT 0xb5
1579
1580/* pci_peek()
1581 * TRAP: HV_FAST_TRAP
1582 * FUNCTION: HV_FAST_PCI_PEEK
1583 * ARG0: devhandle
1584 * ARG1: real address
1585 * ARG2: size
1586 * RET0: status
1587 * RET1: error_flag
1588 * RET2: data
1589 * ERRORS: EINVAL Invalid devhandle or size
1590 * EBADALIGN Improperly aligned real address
1591 * ENORADDR Bad real address
1592 * ENOACCESS Guest access prohibited
1593 *
1594 * Attempt to read the IO address given by the given devhandle, real address,
1595 * and size. Size must be 1, 2, 4, or 8. The read is performed as a single
1596 * access operation using the given size. If an error occurs when reading
1597 * from the given location, do not generate an error report, but return a
1598 * non-zero value in RET1. If the read was successful, return zero in RET1
1599 * and return the actual data read in RET2. The data returned is size based
1600 * byte swapped.
1601 *
1602 * Non-significant bits in RET2 are not guarenteed to have any specific value
1603 * and therefore must be ignored. If RET1 is returned as non-zero, the data
1604 * value is not guarenteed to have any specific value and should be ignored.
1605 *
1606 * The caller must have permission to read from the given devhandle, real
1607 * address, which must be an IO address. The argument real address must be a
1608 * size aligned address.
1609 *
1610 * The hypervisor implementation of this function must block access to any
1611 * IO address that the guest does not have explicit permission to access.
1612 */
1613#define HV_FAST_PCI_PEEK 0xb6
1614
1615/* pci_poke()
1616 * TRAP: HV_FAST_TRAP
1617 * FUNCTION: HV_FAST_PCI_POKE
1618 * ARG0: devhandle
1619 * ARG1: real address
1620 * ARG2: size
1621 * ARG3: data
1622 * ARG4: pci_device
1623 * RET0: status
1624 * RET1: error_flag
1625 * ERRORS: EINVAL Invalid devhandle, size, or pci_device
1626 * EBADALIGN Improperly aligned real address
1627 * ENORADDR Bad real address
1628 * ENOACCESS Guest access prohibited
1629 * ENOTSUPPORTED Function is not supported by implementation
1630 *
1631 * Attempt to write data to the IO address given by the given devhandle,
1632 * real address, and size. Size must be 1, 2, 4, or 8. The write is
1633 * performed as a single access operation using the given size. Prior to
1634 * writing the data is size based swapped.
1635 *
1636 * If an error occurs when writing to the given location, do not generate an
1637 * error report, but return a non-zero value in RET1. If the write was
1638 * successful, return zero in RET1.
1639 *
1640 * pci_device describes the configuration address of the device being
1641 * written to. The implementation may safely read from offset 0 with
1642 * the configuration space of the device described by devhandle and
1643 * pci_device in order to guarantee that the write portion of the operation
1644 * completes
1645 *
1646 * Any error that occurs due to the read shall be reported using the normal
1647 * error reporting mechanisms .. the read error is not suppressed.
1648 *
1649 * The caller must have permission to write to the given devhandle, real
1650 * address, which must be an IO address. The argument real address must be a
1651 * size aligned address. The caller must have permission to read from
1652 * the given devhandle, pci_device cofiguration space offset 0.
1653 *
1654 * The hypervisor implementation of this function must block access to any
1655 * IO address that the guest does not have explicit permission to access.
1656 */
1657#define HV_FAST_PCI_POKE 0xb7
1658
1659/* pci_dma_sync()
1660 * TRAP: HV_FAST_TRAP
1661 * FUNCTION: HV_FAST_PCI_DMA_SYNC
1662 * ARG0: devhandle
1663 * ARG1: real address
1664 * ARG2: size
1665 * ARG3: io_sync_direction
1666 * RET0: status
1667 * RET1: #synced
1668 * ERRORS: EINVAL Invalid devhandle or io_sync_direction
1669 * ENORADDR Bad real address
1670 *
1671 * Synchronize a memory region described by the given real address and size,
1672 * for the device defined by the given devhandle using the direction(s)
1673 * defined by the given io_sync_direction. The argument size is the size of
1674 * the memory region in bytes.
1675 *
1676 * Return the actual number of bytes synchronized in the return value #synced,
1677 * which may be less than or equal to the argument size. If the return
1678 * value #synced is less than size, the caller must continue to call this
1679 * function with updated real address and size arguments until the entire
1680 * memory region is synchronized.
1681 */
1682#define HV_FAST_PCI_DMA_SYNC 0xb8
1683
1684/* PCI MSI services. */
1685
1686#define HV_MSITYPE_MSI32 0x00
1687#define HV_MSITYPE_MSI64 0x01
1688
1689#define HV_MSIQSTATE_IDLE 0x00
1690#define HV_MSIQSTATE_ERROR 0x01
1691
1692#define HV_MSIQ_INVALID 0x00
1693#define HV_MSIQ_VALID 0x01
1694
1695#define HV_MSISTATE_IDLE 0x00
1696#define HV_MSISTATE_DELIVERED 0x01
1697
1698#define HV_MSIVALID_INVALID 0x00
1699#define HV_MSIVALID_VALID 0x01
1700
1701#define HV_PCIE_MSGTYPE_PME_MSG 0x18
1702#define HV_PCIE_MSGTYPE_PME_ACK_MSG 0x1b
1703#define HV_PCIE_MSGTYPE_CORR_MSG 0x30
1704#define HV_PCIE_MSGTYPE_NONFATAL_MSG 0x31
1705#define HV_PCIE_MSGTYPE_FATAL_MSG 0x33
1706
1707#define HV_MSG_INVALID 0x00
1708#define HV_MSG_VALID 0x01
1709
1710/* pci_msiq_conf()
1711 * TRAP: HV_FAST_TRAP
1712 * FUNCTION: HV_FAST_PCI_MSIQ_CONF
1713 * ARG0: devhandle
1714 * ARG1: msiqid
1715 * ARG2: real address
1716 * ARG3: number of entries
1717 * RET0: status
1718 * ERRORS: EINVAL Invalid devhandle, msiqid or nentries
1719 * EBADALIGN Improperly aligned real address
1720 * ENORADDR Bad real address
1721 *
1722 * Configure the MSI queue given by the devhandle and msiqid arguments,
1723 * and to be placed at the given real address and be of the given
1724 * number of entries. The real address must be aligned exactly to match
1725 * the queue size. Each queue entry is 64-bytes long, so f.e. a 32 entry
1726 * queue must be aligned on a 2048 byte real address boundary. The MSI-EQ
1727 * Head and Tail are initialized so that the MSI-EQ is 'empty'.
1728 *
1729 * Implementation Note: Certain implementations have fixed sized queues. In
1730 * that case, number of entries must contain the correct
1731 * value.
1732 */
1733#define HV_FAST_PCI_MSIQ_CONF 0xc0
1734
1735/* pci_msiq_info()
1736 * TRAP: HV_FAST_TRAP
1737 * FUNCTION: HV_FAST_PCI_MSIQ_INFO
1738 * ARG0: devhandle
1739 * ARG1: msiqid
1740 * RET0: status
1741 * RET1: real address
1742 * RET2: number of entries
1743 * ERRORS: EINVAL Invalid devhandle or msiqid
1744 *
1745 * Return the configuration information for the MSI queue described
1746 * by the given devhandle and msiqid. The base address of the queue
1747 * is returned in ARG1 and the number of entries is returned in ARG2.
1748 * If the queue is unconfigured, the real address is undefined and the
1749 * number of entries will be returned as zero.
1750 */
1751#define HV_FAST_PCI_MSIQ_INFO 0xc1
1752
1753/* pci_msiq_getvalid()
1754 * TRAP: HV_FAST_TRAP
1755 * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID
1756 * ARG0: devhandle
1757 * ARG1: msiqid
1758 * RET0: status
1759 * RET1: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID)
1760 * ERRORS: EINVAL Invalid devhandle or msiqid
1761 *
1762 * Get the valid state of the MSI-EQ described by the given devhandle and
1763 * msiqid.
1764 */
1765#define HV_FAST_PCI_MSIQ_GETVALID 0xc2
1766
1767/* pci_msiq_setvalid()
1768 * TRAP: HV_FAST_TRAP
1769 * FUNCTION: HV_FAST_PCI_MSIQ_SETVALID
1770 * ARG0: devhandle
1771 * ARG1: msiqid
1772 * ARG2: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID)
1773 * RET0: status
1774 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqvalid
1775 * value or MSI EQ is uninitialized
1776 *
1777 * Set the valid state of the MSI-EQ described by the given devhandle and
1778 * msiqid to the given msiqvalid.
1779 */
1780#define HV_FAST_PCI_MSIQ_SETVALID 0xc3
1781
1782/* pci_msiq_getstate()
1783 * TRAP: HV_FAST_TRAP
1784 * FUNCTION: HV_FAST_PCI_MSIQ_GETSTATE
1785 * ARG0: devhandle
1786 * ARG1: msiqid
1787 * RET0: status
1788 * RET1: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
1789 * ERRORS: EINVAL Invalid devhandle or msiqid
1790 *
1791 * Get the state of the MSI-EQ described by the given devhandle and
1792 * msiqid.
1793 */
1794#define HV_FAST_PCI_MSIQ_GETSTATE 0xc4
1795
1796/* pci_msiq_getvalid()
1797 * TRAP: HV_FAST_TRAP
1798 * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID
1799 * ARG0: devhandle
1800 * ARG1: msiqid
1801 * ARG2: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
1802 * RET0: status
1803 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqstate
1804 * value or MSI EQ is uninitialized
1805 *
1806 * Set the state of the MSI-EQ described by the given devhandle and
1807 * msiqid to the given msiqvalid.
1808 */
1809#define HV_FAST_PCI_MSIQ_SETSTATE 0xc5
1810
1811/* pci_msiq_gethead()
1812 * TRAP: HV_FAST_TRAP
1813 * FUNCTION: HV_FAST_PCI_MSIQ_GETHEAD
1814 * ARG0: devhandle
1815 * ARG1: msiqid
1816 * RET0: status
1817 * RET1: msiqhead
1818 * ERRORS: EINVAL Invalid devhandle or msiqid
1819 *
1820 * Get the current MSI EQ queue head for the MSI-EQ described by the
1821 * given devhandle and msiqid.
1822 */
1823#define HV_FAST_PCI_MSIQ_GETHEAD 0xc6
1824
1825/* pci_msiq_sethead()
1826 * TRAP: HV_FAST_TRAP
1827 * FUNCTION: HV_FAST_PCI_MSIQ_SETHEAD
1828 * ARG0: devhandle
1829 * ARG1: msiqid
1830 * ARG2: msiqhead
1831 * RET0: status
1832 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqhead,
1833 * or MSI EQ is uninitialized
1834 *
1835 * Set the current MSI EQ queue head for the MSI-EQ described by the
1836 * given devhandle and msiqid.
1837 */
1838#define HV_FAST_PCI_MSIQ_SETHEAD 0xc7
1839
1840/* pci_msiq_gettail()
1841 * TRAP: HV_FAST_TRAP
1842 * FUNCTION: HV_FAST_PCI_MSIQ_GETTAIL
1843 * ARG0: devhandle
1844 * ARG1: msiqid
1845 * RET0: status
1846 * RET1: msiqtail
1847 * ERRORS: EINVAL Invalid devhandle or msiqid
1848 *
1849 * Get the current MSI EQ queue tail for the MSI-EQ described by the
1850 * given devhandle and msiqid.
1851 */
1852#define HV_FAST_PCI_MSIQ_GETTAIL 0xc8
1853
1854/* pci_msi_getvalid()
1855 * TRAP: HV_FAST_TRAP
1856 * FUNCTION: HV_FAST_PCI_MSI_GETVALID
1857 * ARG0: devhandle
1858 * ARG1: msinum
1859 * RET0: status
1860 * RET1: msivalidstate
1861 * ERRORS: EINVAL Invalid devhandle or msinum
1862 *
1863 * Get the current valid/enabled state for the MSI defined by the
1864 * given devhandle and msinum.
1865 */
1866#define HV_FAST_PCI_MSI_GETVALID 0xc9
1867
1868/* pci_msi_setvalid()
1869 * TRAP: HV_FAST_TRAP
1870 * FUNCTION: HV_FAST_PCI_MSI_SETVALID
1871 * ARG0: devhandle
1872 * ARG1: msinum
1873 * ARG2: msivalidstate
1874 * RET0: status
1875 * ERRORS: EINVAL Invalid devhandle or msinum or msivalidstate
1876 *
1877 * Set the current valid/enabled state for the MSI defined by the
1878 * given devhandle and msinum.
1879 */
1880#define HV_FAST_PCI_MSI_SETVALID 0xca
1881
1882/* pci_msi_getmsiq()
1883 * TRAP: HV_FAST_TRAP
1884 * FUNCTION: HV_FAST_PCI_MSI_GETMSIQ
1885 * ARG0: devhandle
1886 * ARG1: msinum
1887 * RET0: status
1888 * RET1: msiqid
1889 * ERRORS: EINVAL Invalid devhandle or msinum or MSI is unbound
1890 *
1891 * Get the MSI EQ that the MSI defined by the given devhandle and
1892 * msinum is bound to.
1893 */
1894#define HV_FAST_PCI_MSI_GETMSIQ 0xcb
1895
1896/* pci_msi_setmsiq()
1897 * TRAP: HV_FAST_TRAP
1898 * FUNCTION: HV_FAST_PCI_MSI_SETMSIQ
1899 * ARG0: devhandle
1900 * ARG1: msinum
1901 * ARG2: msitype
1902 * ARG3: msiqid
1903 * RET0: status
1904 * ERRORS: EINVAL Invalid devhandle or msinum or msiqid
1905 *
1906 * Set the MSI EQ that the MSI defined by the given devhandle and
1907 * msinum is bound to.
1908 */
1909#define HV_FAST_PCI_MSI_SETMSIQ 0xcc
1910
1911/* pci_msi_getstate()
1912 * TRAP: HV_FAST_TRAP
1913 * FUNCTION: HV_FAST_PCI_MSI_GETSTATE
1914 * ARG0: devhandle
1915 * ARG1: msinum
1916 * RET0: status
1917 * RET1: msistate
1918 * ERRORS: EINVAL Invalid devhandle or msinum
1919 *
1920 * Get the state of the MSI defined by the given devhandle and msinum.
1921 * If not initialized, return HV_MSISTATE_IDLE.
1922 */
1923#define HV_FAST_PCI_MSI_GETSTATE 0xcd
1924
1925/* pci_msi_setstate()
1926 * TRAP: HV_FAST_TRAP
1927 * FUNCTION: HV_FAST_PCI_MSI_SETSTATE
1928 * ARG0: devhandle
1929 * ARG1: msinum
1930 * ARG2: msistate
1931 * RET0: status
1932 * ERRORS: EINVAL Invalid devhandle or msinum or msistate
1933 *
1934 * Set the state of the MSI defined by the given devhandle and msinum.
1935 */
1936#define HV_FAST_PCI_MSI_SETSTATE 0xce
1937
1938/* pci_msg_getmsiq()
1939 * TRAP: HV_FAST_TRAP
1940 * FUNCTION: HV_FAST_PCI_MSG_GETMSIQ
1941 * ARG0: devhandle
1942 * ARG1: msgtype
1943 * RET0: status
1944 * RET1: msiqid
1945 * ERRORS: EINVAL Invalid devhandle or msgtype
1946 *
1947 * Get the MSI EQ of the MSG defined by the given devhandle and msgtype.
1948 */
1949#define HV_FAST_PCI_MSG_GETMSIQ 0xd0
1950
1951/* pci_msg_setmsiq()
1952 * TRAP: HV_FAST_TRAP
1953 * FUNCTION: HV_FAST_PCI_MSG_SETMSIQ
1954 * ARG0: devhandle
1955 * ARG1: msgtype
1956 * ARG2: msiqid
1957 * RET0: status
1958 * ERRORS: EINVAL Invalid devhandle, msgtype, or msiqid
1959 *
1960 * Set the MSI EQ of the MSG defined by the given devhandle and msgtype.
1961 */
1962#define HV_FAST_PCI_MSG_SETMSIQ 0xd1
1963
1964/* pci_msg_getvalid()
1965 * TRAP: HV_FAST_TRAP
1966 * FUNCTION: HV_FAST_PCI_MSG_GETVALID
1967 * ARG0: devhandle
1968 * ARG1: msgtype
1969 * RET0: status
1970 * RET1: msgvalidstate
1971 * ERRORS: EINVAL Invalid devhandle or msgtype
1972 *
1973 * Get the valid/enabled state of the MSG defined by the given
1974 * devhandle and msgtype.
1975 */
1976#define HV_FAST_PCI_MSG_GETVALID 0xd2
1977
1978/* pci_msg_setvalid()
1979 * TRAP: HV_FAST_TRAP
1980 * FUNCTION: HV_FAST_PCI_MSG_SETVALID
1981 * ARG0: devhandle
1982 * ARG1: msgtype
1983 * ARG2: msgvalidstate
1984 * RET0: status
1985 * ERRORS: EINVAL Invalid devhandle or msgtype or msgvalidstate
1986 *
1987 * Set the valid/enabled state of the MSG defined by the given
1988 * devhandle and msgtype.
1989 */
1990#define HV_FAST_PCI_MSG_SETVALID 0xd3
1991
1992/* Performance counter services. */
1993
1994#define HV_PERF_JBUS_PERF_CTRL_REG 0x00
1995#define HV_PERF_JBUS_PERF_CNT_REG 0x01
1996#define HV_PERF_DRAM_PERF_CTRL_REG_0 0x02
1997#define HV_PERF_DRAM_PERF_CNT_REG_0 0x03
1998#define HV_PERF_DRAM_PERF_CTRL_REG_1 0x04
1999#define HV_PERF_DRAM_PERF_CNT_REG_1 0x05
2000#define HV_PERF_DRAM_PERF_CTRL_REG_2 0x06
2001#define HV_PERF_DRAM_PERF_CNT_REG_2 0x07
2002#define HV_PERF_DRAM_PERF_CTRL_REG_3 0x08
2003#define HV_PERF_DRAM_PERF_CNT_REG_3 0x09
2004
2005/* get_perfreg()
2006 * TRAP: HV_FAST_TRAP
2007 * FUNCTION: HV_FAST_GET_PERFREG
2008 * ARG0: performance reg number
2009 * RET0: status
2010 * RET1: performance reg value
2011 * ERRORS: EINVAL Invalid performance register number
2012 * ENOACCESS No access allowed to performance counters
2013 *
2014 * Read the value of the given DRAM/JBUS performance counter/control register.
2015 */
2016#define HV_FAST_GET_PERFREG 0x100
2017
2018/* set_perfreg()
2019 * TRAP: HV_FAST_TRAP
2020 * FUNCTION: HV_FAST_SET_PERFREG
2021 * ARG0: performance reg number
2022 * ARG1: performance reg value
2023 * RET0: status
2024 * ERRORS: EINVAL Invalid performance register number
2025 * ENOACCESS No access allowed to performance counters
2026 *
2027 * Write the given performance reg value to the given DRAM/JBUS
2028 * performance counter/control register.
2029 */
2030#define HV_FAST_SET_PERFREG 0x101
2031
2032/* MMU statistics services.
2033 *
2034 * The hypervisor maintains MMU statistics and privileged code provides
2035 * a buffer where these statistics can be collected. It is continually
2036 * updated once configured. The layout is as follows:
2037 */
2038#ifndef __ASSEMBLY__
2039struct hv_mmu_statistics {
2040 unsigned long immu_tsb_hits_ctx0_8k_tte;
2041 unsigned long immu_tsb_ticks_ctx0_8k_tte;
2042 unsigned long immu_tsb_hits_ctx0_64k_tte;
2043 unsigned long immu_tsb_ticks_ctx0_64k_tte;
2044 unsigned long __reserved1[2];
2045 unsigned long immu_tsb_hits_ctx0_4mb_tte;
2046 unsigned long immu_tsb_ticks_ctx0_4mb_tte;
2047 unsigned long __reserved2[2];
2048 unsigned long immu_tsb_hits_ctx0_256mb_tte;
2049 unsigned long immu_tsb_ticks_ctx0_256mb_tte;
2050 unsigned long __reserved3[4];
2051 unsigned long immu_tsb_hits_ctxnon0_8k_tte;
2052 unsigned long immu_tsb_ticks_ctxnon0_8k_tte;
2053 unsigned long immu_tsb_hits_ctxnon0_64k_tte;
2054 unsigned long immu_tsb_ticks_ctxnon0_64k_tte;
2055 unsigned long __reserved4[2];
2056 unsigned long immu_tsb_hits_ctxnon0_4mb_tte;
2057 unsigned long immu_tsb_ticks_ctxnon0_4mb_tte;
2058 unsigned long __reserved5[2];
2059 unsigned long immu_tsb_hits_ctxnon0_256mb_tte;
2060 unsigned long immu_tsb_ticks_ctxnon0_256mb_tte;
2061 unsigned long __reserved6[4];
2062 unsigned long dmmu_tsb_hits_ctx0_8k_tte;
2063 unsigned long dmmu_tsb_ticks_ctx0_8k_tte;
2064 unsigned long dmmu_tsb_hits_ctx0_64k_tte;
2065 unsigned long dmmu_tsb_ticks_ctx0_64k_tte;
2066 unsigned long __reserved7[2];
2067 unsigned long dmmu_tsb_hits_ctx0_4mb_tte;
2068 unsigned long dmmu_tsb_ticks_ctx0_4mb_tte;
2069 unsigned long __reserved8[2];
2070 unsigned long dmmu_tsb_hits_ctx0_256mb_tte;
2071 unsigned long dmmu_tsb_ticks_ctx0_256mb_tte;
2072 unsigned long __reserved9[4];
2073 unsigned long dmmu_tsb_hits_ctxnon0_8k_tte;
2074 unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte;
2075 unsigned long dmmu_tsb_hits_ctxnon0_64k_tte;
2076 unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte;
2077 unsigned long __reserved10[2];
2078 unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte;
2079 unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte;
2080 unsigned long __reserved11[2];
2081 unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte;
2082 unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte;
2083 unsigned long __reserved12[4];
2084};
2085#endif
2086
2087/* mmustat_conf()
2088 * TRAP: HV_FAST_TRAP
2089 * FUNCTION: HV_FAST_MMUSTAT_CONF
2090 * ARG0: real address
2091 * RET0: status
2092 * RET1: real address
2093 * ERRORS: ENORADDR Invalid real address
2094 * EBADALIGN Real address not aligned on 64-byte boundary
2095 * EBADTRAP API not supported on this processor
2096 *
2097 * Enable MMU statistic gathering using the buffer at the given real
2098 * address on the current virtual CPU. The new buffer real address
2099 * is given in ARG1, and the previously specified buffer real address
2100 * is returned in RET1, or is returned as zero for the first invocation.
2101 *
2102 * If the passed in real address argument is zero, this will disable
2103 * MMU statistic collection on the current virtual CPU. If an error is
2104 * returned then no statistics are collected.
2105 *
2106 * The buffer contents should be initialized to all zeros before being
2107 * given to the hypervisor or else the statistics will be meaningless.
2108 */
2109#define HV_FAST_MMUSTAT_CONF 0x102
2110
2111/* mmustat_info()
2112 * TRAP: HV_FAST_TRAP
2113 * FUNCTION: HV_FAST_MMUSTAT_INFO
2114 * RET0: status
2115 * RET1: real address
2116 * ERRORS: EBADTRAP API not supported on this processor
2117 *
2118 * Return the current state and real address of the currently configured
2119 * MMU statistics buffer on the current virtual CPU.
2120 */
2121#define HV_FAST_MMUSTAT_INFO 0x103
2122
2123/* Function numbers for HV_CORE_TRAP. */
2124#define HV_CORE_VER 0x00
2125#define HV_CORE_PUTCHAR 0x01
2126#define HV_CORE_EXIT 0x02
2127
2128#endif /* !(_SPARC64_HYPERVISOR_H) */
diff --git a/include/asm-sparc64/idprom.h b/include/asm-sparc64/idprom.h
index 701483c5465d..77fbf987385f 100644
--- a/include/asm-sparc64/idprom.h
+++ b/include/asm-sparc64/idprom.h
@@ -9,15 +9,7 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12/* Offset into the EEPROM where the id PROM is located on the 4c */ 12struct idprom {
13#define IDPROM_OFFSET 0x7d8
14
15/* On sun4m; physical. */
16/* MicroSPARC(-II) does not decode 31rd bit, but it works. */
17#define IDPROM_OFFSET_M 0xfd8
18
19struct idprom
20{
21 u8 id_format; /* Format identifier (always 0x01) */ 13 u8 id_format; /* Format identifier (always 0x01) */
22 u8 id_machtype; /* Machine type */ 14 u8 id_machtype; /* Machine type */
23 u8 id_ethaddr[6]; /* Hardware ethernet address */ 15 u8 id_ethaddr[6]; /* Hardware ethernet address */
@@ -30,6 +22,4 @@ struct idprom
30extern struct idprom *idprom; 22extern struct idprom *idprom;
31extern void idprom_init(void); 23extern void idprom_init(void);
32 24
33#define IDPROM_SIZE (sizeof(struct idprom))
34
35#endif /* !(_SPARC_IDPROM_H) */ 25#endif /* !(_SPARC_IDPROM_H) */
diff --git a/include/asm-sparc64/intr_queue.h b/include/asm-sparc64/intr_queue.h
new file mode 100644
index 000000000000..206077dedc2a
--- /dev/null
+++ b/include/asm-sparc64/intr_queue.h
@@ -0,0 +1,15 @@
1#ifndef _SPARC64_INTR_QUEUE_H
2#define _SPARC64_INTR_QUEUE_H
3
4/* Sun4v interrupt queue registers, accessed via ASI_QUEUE. */
5
6#define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */
7#define INTRQ_CPU_MONDO_TAIL 0x3c8 /* CPU mondo tail */
8#define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */
9#define INTRQ_DEVICE_MONDO_TAIL 0x3d8 /* Device mondo tail */
10#define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */
11#define INTRQ_RESUM_MONDO_TAIL 0x3e8 /* Resumable error mondo tail */
12#define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */
13#define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */
14
15#endif /* !(_SPARC64_INTR_QUEUE_H) */
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 8b70edcb80dc..de33d6e1afb5 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -72,8 +72,11 @@ struct ino_bucket {
72#define IMAP_VALID 0x80000000 /* IRQ Enabled */ 72#define IMAP_VALID 0x80000000 /* IRQ Enabled */
73#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ 73#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */
74#define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */ 74#define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */
75#define IMAP_TID_SHIFT 26
75#define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */ 76#define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */
77#define IMAP_AID_SHIFT 26
76#define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */ 78#define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */
79#define IMAP_NID_SHIFT 21
77#define IMAP_IGN 0x000007c0 /* IRQ Group Number */ 80#define IMAP_IGN 0x000007c0 /* IRQ Group Number */
78#define IMAP_INO 0x0000003f /* IRQ Number */ 81#define IMAP_INO 0x0000003f /* IRQ Number */
79#define IMAP_INR 0x000007ff /* Full interrupt number*/ 82#define IMAP_INR 0x000007ff /* Full interrupt number*/
@@ -111,6 +114,7 @@ extern void disable_irq(unsigned int);
111#define disable_irq_nosync disable_irq 114#define disable_irq_nosync disable_irq
112extern void enable_irq(unsigned int); 115extern void enable_irq(unsigned int);
113extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap); 116extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap);
117extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags);
114extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); 118extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
115 119
116static __inline__ void set_softint(unsigned long bits) 120static __inline__ void set_softint(unsigned long bits)
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 8627eed6e83d..230ba678d3b0 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -4,20 +4,9 @@
4#include <linux/config.h> 4#include <linux/config.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm/const.h> 6#include <asm/const.h>
7#include <asm/hypervisor.h>
7 8
8/* 9#define CTX_NR_BITS 13
9 * For the 8k pagesize kernel, use only 10 hw context bits to optimize some
10 * shifts in the fast tlbmiss handlers, instead of all 13 bits (specifically
11 * for vpte offset calculation). For other pagesizes, this optimization in
12 * the tlbhandlers can not be done; but still, all 13 bits can not be used
13 * because the tlb handlers use "andcc" instruction which sign extends 13
14 * bit arguments.
15 */
16#if PAGE_SHIFT == 13
17#define CTX_NR_BITS 10
18#else
19#define CTX_NR_BITS 12
20#endif
21 10
22#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL)) 11#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
23 12
@@ -90,8 +79,27 @@
90 79
91#ifndef __ASSEMBLY__ 80#ifndef __ASSEMBLY__
92 81
82#define TSB_ENTRY_ALIGNMENT 16
83
84struct tsb {
85 unsigned long tag;
86 unsigned long pte;
87} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
88
89extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
90extern void tsb_flush(unsigned long ent, unsigned long tag);
91extern void tsb_init(struct tsb *tsb, unsigned long size);
92
93typedef struct { 93typedef struct {
94 unsigned long sparc64_ctx_val; 94 spinlock_t lock;
95 unsigned long sparc64_ctx_val;
96 struct tsb *tsb;
97 unsigned long tsb_rss_limit;
98 unsigned long tsb_nentries;
99 unsigned long tsb_reg_val;
100 unsigned long tsb_map_vaddr;
101 unsigned long tsb_map_pte;
102 struct hv_tsb_descr tsb_descr;
95} mm_context_t; 103} mm_context_t;
96 104
97#endif /* !__ASSEMBLY__ */ 105#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 57ee7b306189..e7974321d052 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -19,96 +19,98 @@ extern unsigned long tlb_context_cache;
19extern unsigned long mmu_context_bmap[]; 19extern unsigned long mmu_context_bmap[];
20 20
21extern void get_new_mmu_context(struct mm_struct *mm); 21extern void get_new_mmu_context(struct mm_struct *mm);
22#ifdef CONFIG_SMP
23extern void smp_new_mmu_context_version(void);
24#else
25#define smp_new_mmu_context_version() do { } while (0)
26#endif
27
28extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
29extern void destroy_context(struct mm_struct *mm);
30
31extern void __tsb_context_switch(unsigned long pgd_pa,
32 unsigned long tsb_reg,
33 unsigned long tsb_vaddr,
34 unsigned long tsb_pte,
35 unsigned long tsb_descr_pa);
36
37static inline void tsb_context_switch(struct mm_struct *mm)
38{
39 __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
40 mm->context.tsb_map_vaddr,
41 mm->context.tsb_map_pte,
42 __pa(&mm->context.tsb_descr));
43}
22 44
23/* Initialize a new mmu context. This is invoked when a new 45extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss);
24 * address space instance (unique or shared) is instantiated. 46#ifdef CONFIG_SMP
25 * This just needs to set mm->context to an invalid context. 47extern void smp_tsb_sync(struct mm_struct *mm);
26 */ 48#else
27#define init_new_context(__tsk, __mm) \ 49#define smp_tsb_sync(__mm) do { } while (0)
28 (((__mm)->context.sparc64_ctx_val = 0UL), 0) 50#endif
29
30/* Destroy a dead context. This occurs when mmput drops the
31 * mm_users count to zero, the mmaps have been released, and
32 * all the page tables have been flushed. Our job is to destroy
33 * any remaining processor-specific state, and in the sparc64
34 * case this just means freeing up the mmu context ID held by
35 * this task if valid.
36 */
37#define destroy_context(__mm) \
38do { spin_lock(&ctx_alloc_lock); \
39 if (CTX_VALID((__mm)->context)) { \
40 unsigned long nr = CTX_NRBITS((__mm)->context); \
41 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
42 } \
43 spin_unlock(&ctx_alloc_lock); \
44} while(0)
45
46/* Reload the two core values used by TLB miss handler
47 * processing on sparc64. They are:
48 * 1) The physical address of mm->pgd, when full page
49 * table walks are necessary, this is where the
50 * search begins.
51 * 2) A "PGD cache". For 32-bit tasks only pgd[0] is
52 * ever used since that maps the entire low 4GB
53 * completely. To speed up TLB miss processing we
54 * make this value available to the handlers. This
55 * decreases the amount of memory traffic incurred.
56 */
57#define reload_tlbmiss_state(__tsk, __mm) \
58do { \
59 register unsigned long paddr asm("o5"); \
60 register unsigned long pgd_cache asm("o4"); \
61 paddr = __pa((__mm)->pgd); \
62 pgd_cache = 0UL; \
63 if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
64 pgd_cache = get_pgd_cache((__mm)->pgd); \
65 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
66 "mov %3, %%g4\n\t" \
67 "mov %0, %%g7\n\t" \
68 "stxa %1, [%%g4] %2\n\t" \
69 "membar #Sync\n\t" \
70 "wrpr %%g0, 0x096, %%pstate" \
71 : /* no outputs */ \
72 : "r" (paddr), "r" (pgd_cache),\
73 "i" (ASI_DMMU), "i" (TSB_REG)); \
74} while(0)
75 51
76/* Set MMU context in the actual hardware. */ 52/* Set MMU context in the actual hardware. */
77#define load_secondary_context(__mm) \ 53#define load_secondary_context(__mm) \
78 __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ 54 __asm__ __volatile__( \
79 "flush %%g6" \ 55 "\n661: stxa %0, [%1] %2\n" \
80 : /* No outputs */ \ 56 " .section .sun4v_1insn_patch, \"ax\"\n" \
81 : "r" (CTX_HWBITS((__mm)->context)), \ 57 " .word 661b\n" \
82 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU)) 58 " stxa %0, [%1] %3\n" \
59 " .previous\n" \
60 " flush %%g6\n" \
61 : /* No outputs */ \
62 : "r" (CTX_HWBITS((__mm)->context)), \
63 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
83 64
84extern void __flush_tlb_mm(unsigned long, unsigned long); 65extern void __flush_tlb_mm(unsigned long, unsigned long);
85 66
86/* Switch the current MM context. */ 67/* Switch the current MM context. Interrupts are disabled. */
87static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 68static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
88{ 69{
89 unsigned long ctx_valid; 70 unsigned long ctx_valid, flags;
90 int cpu; 71 int cpu;
91 72
92 /* Note: page_table_lock is used here to serialize switch_mm 73 spin_lock_irqsave(&mm->context.lock, flags);
93 * and activate_mm, and their calls to get_new_mmu_context.
94 * This use of page_table_lock is unrelated to its other uses.
95 */
96 spin_lock(&mm->page_table_lock);
97 ctx_valid = CTX_VALID(mm->context); 74 ctx_valid = CTX_VALID(mm->context);
98 if (!ctx_valid) 75 if (!ctx_valid)
99 get_new_mmu_context(mm); 76 get_new_mmu_context(mm);
100 spin_unlock(&mm->page_table_lock);
101 77
102 if (!ctx_valid || (old_mm != mm)) { 78 /* We have to be extremely careful here or else we will miss
103 load_secondary_context(mm); 79 * a TSB grow if we switch back and forth between a kernel
104 reload_tlbmiss_state(tsk, mm); 80 * thread and an address space which has it's TSB size increased
105 } 81 * on another processor.
82 *
83 * It is possible to play some games in order to optimize the
84 * switch, but the safest thing to do is to unconditionally
85 * perform the secondary context load and the TSB context switch.
86 *
87 * For reference the bad case is, for address space "A":
88 *
89 * CPU 0 CPU 1
90 * run address space A
91 * set cpu0's bits in cpu_vm_mask
92 * switch to kernel thread, borrow
93 * address space A via entry_lazy_tlb
94 * run address space A
95 * set cpu1's bit in cpu_vm_mask
96 * flush_tlb_pending()
97 * reset cpu_vm_mask to just cpu1
98 * TSB grow
99 * run address space A
100 * context was valid, so skip
101 * TSB context switch
102 *
103 * At that point cpu0 continues to use a stale TSB, the one from
104 * before the TSB grow performed on cpu1. cpu1 did not cross-call
105 * cpu0 to update it's TSB because at that point the cpu_vm_mask
106 * only had cpu1 set in it.
107 */
108 load_secondary_context(mm);
109 tsb_context_switch(mm);
106 110
107 /* Even if (mm == old_mm) we _must_ check 111 /* Any time a processor runs a context on an address space
108 * the cpu_vm_mask. If we do not we could 112 * for the first time, we must flush that context out of the
109 * corrupt the TLB state because of how 113 * local TLB.
110 * smp_flush_tlb_{page,range,mm} on sparc64
111 * and lazy tlb switches work. -DaveM
112 */ 114 */
113 cpu = smp_processor_id(); 115 cpu = smp_processor_id();
114 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { 116 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
@@ -116,6 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
116 __flush_tlb_mm(CTX_HWBITS(mm->context), 118 __flush_tlb_mm(CTX_HWBITS(mm->context),
117 SECONDARY_CONTEXT); 119 SECONDARY_CONTEXT);
118 } 120 }
121 spin_unlock_irqrestore(&mm->context.lock, flags);
119} 122}
120 123
121#define deactivate_mm(tsk,mm) do { } while (0) 124#define deactivate_mm(tsk,mm) do { } while (0)
@@ -123,23 +126,20 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
123/* Activate a new MM instance for the current task. */ 126/* Activate a new MM instance for the current task. */
124static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) 127static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
125{ 128{
129 unsigned long flags;
126 int cpu; 130 int cpu;
127 131
128 /* Note: page_table_lock is used here to serialize switch_mm 132 spin_lock_irqsave(&mm->context.lock, flags);
129 * and activate_mm, and their calls to get_new_mmu_context.
130 * This use of page_table_lock is unrelated to its other uses.
131 */
132 spin_lock(&mm->page_table_lock);
133 if (!CTX_VALID(mm->context)) 133 if (!CTX_VALID(mm->context))
134 get_new_mmu_context(mm); 134 get_new_mmu_context(mm);
135 cpu = smp_processor_id(); 135 cpu = smp_processor_id();
136 if (!cpu_isset(cpu, mm->cpu_vm_mask)) 136 if (!cpu_isset(cpu, mm->cpu_vm_mask))
137 cpu_set(cpu, mm->cpu_vm_mask); 137 cpu_set(cpu, mm->cpu_vm_mask);
138 spin_unlock(&mm->page_table_lock);
139 138
140 load_secondary_context(mm); 139 load_secondary_context(mm);
141 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 140 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
142 reload_tlbmiss_state(current, mm); 141 tsb_context_switch(mm);
142 spin_unlock_irqrestore(&mm->context.lock, flags);
143} 143}
144 144
145#endif /* !(__ASSEMBLY__) */ 145#endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-sparc64/numnodes.h b/include/asm-sparc64/numnodes.h
new file mode 100644
index 000000000000..017e7e74f5e7
--- /dev/null
+++ b/include/asm-sparc64/numnodes.h
@@ -0,0 +1,6 @@
1#ifndef _SPARC64_NUMNODES_H
2#define _SPARC64_NUMNODES_H
3
4#define NODES_SHIFT 0
5
6#endif /* !(_SPARC64_NUMNODES_H) */
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 3c59b2693fb9..c754676e13ef 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -12,18 +12,8 @@
12#include <linux/config.h> 12#include <linux/config.h>
13#include <asm/openprom.h> 13#include <asm/openprom.h>
14 14
15/* Enumeration to describe the prom major version we have detected. */ 15/* OBP version string. */
16enum prom_major_version { 16extern char prom_version[];
17 PROM_V0, /* Original sun4c V0 prom */
18 PROM_V2, /* sun4c and early sun4m V2 prom */
19 PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */
20 PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */
21 PROM_AP1000, /* actually no prom at all */
22};
23
24extern enum prom_major_version prom_vers;
25/* Revision, and firmware revision. */
26extern unsigned int prom_rev, prom_prev;
27 17
28/* Root node of the prom device tree, this stays constant after 18/* Root node of the prom device tree, this stays constant after
29 * initialization is complete. 19 * initialization is complete.
@@ -39,6 +29,9 @@ extern int prom_stdin, prom_stdout;
39extern int prom_chosen_node; 29extern int prom_chosen_node;
40 30
41/* Helper values and strings in arch/sparc64/kernel/head.S */ 31/* Helper values and strings in arch/sparc64/kernel/head.S */
32extern const char prom_peer_name[];
33extern const char prom_compatible_name[];
34extern const char prom_root_compatible[];
42extern const char prom_finddev_name[]; 35extern const char prom_finddev_name[];
43extern const char prom_chosen_path[]; 36extern const char prom_chosen_path[];
44extern const char prom_getprop_name[]; 37extern const char prom_getprop_name[];
@@ -130,15 +123,6 @@ extern void prom_setcallback(callback_func_t func_ptr);
130 */ 123 */
131extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); 124extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
132 125
133/* Get the prom major version. */
134extern int prom_version(void);
135
136/* Get the prom plugin revision. */
137extern int prom_getrev(void);
138
139/* Get the prom firmware revision. */
140extern int prom_getprev(void);
141
142/* Character operations to/from the console.... */ 126/* Character operations to/from the console.... */
143 127
144/* Non-blocking get character from console. */ 128/* Non-blocking get character from console. */
@@ -164,6 +148,7 @@ enum prom_input_device {
164 PROMDEV_ITTYA, /* input from ttya */ 148 PROMDEV_ITTYA, /* input from ttya */
165 PROMDEV_ITTYB, /* input from ttyb */ 149 PROMDEV_ITTYB, /* input from ttyb */
166 PROMDEV_IRSC, /* input from rsc */ 150 PROMDEV_IRSC, /* input from rsc */
151 PROMDEV_IVCONS, /* input from virtual-console */
167 PROMDEV_I_UNK, 152 PROMDEV_I_UNK,
168}; 153};
169 154
@@ -176,6 +161,7 @@ enum prom_output_device {
176 PROMDEV_OTTYA, /* to ttya */ 161 PROMDEV_OTTYA, /* to ttya */
177 PROMDEV_OTTYB, /* to ttyb */ 162 PROMDEV_OTTYB, /* to ttyb */
178 PROMDEV_ORSC, /* to rsc */ 163 PROMDEV_ORSC, /* to rsc */
164 PROMDEV_OVCONS, /* to virtual-console */
179 PROMDEV_O_UNK, 165 PROMDEV_O_UNK,
180}; 166};
181 167
@@ -183,10 +169,18 @@ extern enum prom_output_device prom_query_output_device(void);
183 169
184/* Multiprocessor operations... */ 170/* Multiprocessor operations... */
185#ifdef CONFIG_SMP 171#ifdef CONFIG_SMP
186/* Start the CPU with the given device tree node, context table, and context 172/* Start the CPU with the given device tree node at the passed program
187 * at the passed program counter. 173 * counter with the given arg passed in via register %o0.
174 */
175extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
176
177/* Start the CPU with the given cpu ID at the passed program
178 * counter with the given arg passed in via register %o0.
188 */ 179 */
189extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0); 180extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
181
182/* Stop the CPU with the given cpu ID. */
183extern void prom_stopcpu_cpuid(int cpuid);
190 184
191/* Stop the current CPU. */ 185/* Stop the current CPU. */
192extern void prom_stopself(void); 186extern void prom_stopself(void);
@@ -335,6 +329,7 @@ int cpu_find_by_mid(int mid, int *prom_node);
335 329
336/* Client interface level routines. */ 330/* Client interface level routines. */
337extern void prom_set_trap_table(unsigned long tba); 331extern void prom_set_trap_table(unsigned long tba);
332extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa);
338 333
339extern long p1275_cmd(const char *, long, ...); 334extern long p1275_cmd(const char *, long, ...);
340 335
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 5426bb28a993..fcb2812265f4 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -104,10 +104,12 @@ typedef unsigned long pgprot_t;
104#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 104#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
105#define ARCH_HAS_SETCLEAR_HUGE_PTE 105#define ARCH_HAS_SETCLEAR_HUGE_PTE
106#define ARCH_HAS_HUGETLB_PREFAULT_HOOK 106#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
107#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
107#endif 108#endif
108 109
109#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ 110#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
110 (_AC(0x0000000070000000,UL)) : (PAGE_OFFSET)) 111 (_AC(0x0000000070000000,UL)) : \
112 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
111 113
112#endif /* !(__ASSEMBLY__) */ 114#endif /* !(__ASSEMBLY__) */
113 115
@@ -124,17 +126,10 @@ typedef unsigned long pgprot_t;
124#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 126#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
125#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 127#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
126 128
127/* PFNs are real physical page numbers. However, mem_map only begins to record 129#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
128 * per-page information starting at pfn_base. This is to handle systems where
129 * the first physical page in the machine is at some huge physical address,
130 * such as 4GB. This is common on a partitioned E10000, for example.
131 */
132extern struct page *pfn_to_page(unsigned long pfn);
133extern unsigned long page_to_pfn(struct page *);
134 130
135#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) 131#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
136 132
137#define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr)
138#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 133#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
139 134
140#define virt_to_phys __pa 135#define virt_to_phys __pa
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index dd35a2c7798a..1396f110939a 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -139,6 +139,9 @@ struct pci_pbm_info {
139 /* Opaque 32-bit system bus Port ID. */ 139 /* Opaque 32-bit system bus Port ID. */
140 u32 portid; 140 u32 portid;
141 141
142 /* Opaque 32-bit handle used for hypervisor calls. */
143 u32 devhandle;
144
142 /* Chipset version information. */ 145 /* Chipset version information. */
143 int chip_type; 146 int chip_type;
144#define PBM_CHIP_TYPE_SABRE 1 147#define PBM_CHIP_TYPE_SABRE 1
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index 89bd71b1c0d8..7c5a589ea437 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -41,10 +41,26 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
41 41
42struct pci_dev; 42struct pci_dev;
43 43
44struct pci_iommu_ops {
45 void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *);
46 void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t);
47 dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int);
48 void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int);
49 int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int);
50 void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int);
51 void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int);
52 void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int);
53};
54
55extern struct pci_iommu_ops *pci_iommu_ops;
56
44/* Allocate and map kernel buffer using consistent mode DMA for a device. 57/* Allocate and map kernel buffer using consistent mode DMA for a device.
45 * hwdev should be valid struct pci_dev pointer for PCI devices. 58 * hwdev should be valid struct pci_dev pointer for PCI devices.
46 */ 59 */
47extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); 60static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
61{
62 return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle);
63}
48 64
49/* Free and unmap a consistent DMA buffer. 65/* Free and unmap a consistent DMA buffer.
50 * cpu_addr is what was returned from pci_alloc_consistent, 66 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -54,7 +70,10 @@ extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t
54 * References to the memory and mappings associated with cpu_addr/dma_addr 70 * References to the memory and mappings associated with cpu_addr/dma_addr
55 * past this call are illegal. 71 * past this call are illegal.
56 */ 72 */
57extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); 73static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
74{
75 return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle);
76}
58 77
59/* Map a single buffer of the indicated size for DMA in streaming mode. 78/* Map a single buffer of the indicated size for DMA in streaming mode.
60 * The 32-bit bus address to use is returned. 79 * The 32-bit bus address to use is returned.
@@ -62,7 +81,10 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
62 * Once the device is given the dma address, the device owns this memory 81 * Once the device is given the dma address, the device owns this memory
63 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. 82 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
64 */ 83 */
65extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); 84static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
85{
86 return pci_iommu_ops->map_single(hwdev, ptr, size, direction);
87}
66 88
67/* Unmap a single streaming mode DMA translation. The dma_addr and size 89/* Unmap a single streaming mode DMA translation. The dma_addr and size
68 * must match what was provided for in a previous pci_map_single call. All 90 * must match what was provided for in a previous pci_map_single call. All
@@ -71,7 +93,10 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
71 * After this call, reads by the cpu to the buffer are guaranteed to see 93 * After this call, reads by the cpu to the buffer are guaranteed to see
72 * whatever the device wrote there. 94 * whatever the device wrote there.
73 */ 95 */
74extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); 96static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
97{
98 pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction);
99}
75 100
76/* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ 101/* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */
77#define pci_map_page(dev, page, off, size, dir) \ 102#define pci_map_page(dev, page, off, size, dir) \
@@ -107,15 +132,19 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
107 * Device ownership issues as mentioned above for pci_map_single are 132 * Device ownership issues as mentioned above for pci_map_single are
108 * the same here. 133 * the same here.
109 */ 134 */
110extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, 135static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
111 int nents, int direction); 136{
137 return pci_iommu_ops->map_sg(hwdev, sg, nents, direction);
138}
112 139
113/* Unmap a set of streaming mode DMA translations. 140/* Unmap a set of streaming mode DMA translations.
114 * Again, cpu read rules concerning calls here are the same as for 141 * Again, cpu read rules concerning calls here are the same as for
115 * pci_unmap_single() above. 142 * pci_unmap_single() above.
116 */ 143 */
117extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, 144static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction)
118 int nhwents, int direction); 145{
146 pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction);
147}
119 148
120/* Make physical memory consistent for a single 149/* Make physical memory consistent for a single
121 * streaming mode DMA translation after a transfer. 150 * streaming mode DMA translation after a transfer.
@@ -127,8 +156,10 @@ extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
127 * must first perform a pci_dma_sync_for_device, and then the 156 * must first perform a pci_dma_sync_for_device, and then the
128 * device again owns the buffer. 157 * device again owns the buffer.
129 */ 158 */
130extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, 159static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
131 size_t size, int direction); 160{
161 pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction);
162}
132 163
133static inline void 164static inline void
134pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, 165pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
@@ -144,7 +175,10 @@ pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
144 * The same as pci_dma_sync_single_* but for a scatter-gather list, 175 * The same as pci_dma_sync_single_* but for a scatter-gather list,
145 * same rules and usage. 176 * same rules and usage.
146 */ 177 */
147extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); 178static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
179{
180 pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction);
181}
148 182
149static inline void 183static inline void
150pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, 184pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index a96067cca963..12e4a273bd43 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -6,6 +6,7 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/slab.h>
9 10
10#include <asm/spitfire.h> 11#include <asm/spitfire.h>
11#include <asm/cpudata.h> 12#include <asm/cpudata.h>
@@ -13,172 +14,59 @@
13#include <asm/page.h> 14#include <asm/page.h>
14 15
15/* Page table allocation/freeing. */ 16/* Page table allocation/freeing. */
16#ifdef CONFIG_SMP 17extern kmem_cache_t *pgtable_cache;
17/* Sliiiicck */
18#define pgt_quicklists local_cpu_data()
19#else
20extern struct pgtable_cache_struct {
21 unsigned long *pgd_cache;
22 unsigned long *pte_cache[2];
23 unsigned int pgcache_size;
24} pgt_quicklists;
25#endif
26#define pgd_quicklist (pgt_quicklists.pgd_cache)
27#define pmd_quicklist ((unsigned long *)0)
28#define pte_quicklist (pgt_quicklists.pte_cache)
29#define pgtable_cache_size (pgt_quicklists.pgcache_size)
30 18
31static __inline__ void free_pgd_fast(pgd_t *pgd) 19static inline pgd_t *pgd_alloc(struct mm_struct *mm)
32{ 20{
33 preempt_disable(); 21 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
34 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
35 pgd_quicklist = (unsigned long *) pgd;
36 pgtable_cache_size++;
37 preempt_enable();
38} 22}
39 23
40static __inline__ pgd_t *get_pgd_fast(void) 24static inline void pgd_free(pgd_t *pgd)
41{ 25{
42 unsigned long *ret; 26 kmem_cache_free(pgtable_cache, pgd);
43
44 preempt_disable();
45 if((ret = pgd_quicklist) != NULL) {
46 pgd_quicklist = (unsigned long *)(*ret);
47 ret[0] = 0;
48 pgtable_cache_size--;
49 preempt_enable();
50 } else {
51 preempt_enable();
52 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
53 if(ret)
54 memset(ret, 0, PAGE_SIZE);
55 }
56 return (pgd_t *)ret;
57} 27}
58 28
59static __inline__ void free_pgd_slow(pgd_t *pgd)
60{
61 free_page((unsigned long)pgd);
62}
63
64#ifdef DCACHE_ALIASING_POSSIBLE
65#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
66#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
67#else
68#define VPTE_COLOR(address) 0
69#define DCACHE_COLOR(address) 0
70#endif
71
72#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 29#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
73 30
74static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) 31static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
75{ 32{
76 unsigned long *ret; 33 return kmem_cache_alloc(pgtable_cache,
77 int color = 0; 34 GFP_KERNEL|__GFP_REPEAT);
78
79 preempt_disable();
80 if (pte_quicklist[color] == NULL)
81 color = 1;
82
83 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
84 pte_quicklist[color] = (unsigned long *)(*ret);
85 ret[0] = 0;
86 pgtable_cache_size--;
87 }
88 preempt_enable();
89
90 return (pmd_t *)ret;
91} 35}
92 36
93static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 37static inline void pmd_free(pmd_t *pmd)
94{ 38{
95 pmd_t *pmd; 39 kmem_cache_free(pgtable_cache, pmd);
96
97 pmd = pmd_alloc_one_fast(mm, address);
98 if (!pmd) {
99 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
100 if (pmd)
101 memset(pmd, 0, PAGE_SIZE);
102 }
103 return pmd;
104} 40}
105 41
106static __inline__ void free_pmd_fast(pmd_t *pmd) 42static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
43 unsigned long address)
107{ 44{
108 unsigned long color = DCACHE_COLOR((unsigned long)pmd); 45 return kmem_cache_alloc(pgtable_cache,
109 46 GFP_KERNEL|__GFP_REPEAT);
110 preempt_disable();
111 *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
112 pte_quicklist[color] = (unsigned long *) pmd;
113 pgtable_cache_size++;
114 preempt_enable();
115} 47}
116 48
117static __inline__ void free_pmd_slow(pmd_t *pmd) 49static inline struct page *pte_alloc_one(struct mm_struct *mm,
50 unsigned long address)
118{ 51{
119 free_page((unsigned long)pmd); 52 return virt_to_page(pte_alloc_one_kernel(mm, address));
120} 53}
121 54
122#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
123#define pmd_populate(MM,PMD,PTE_PAGE) \
124 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
125
126extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
127
128static inline struct page *
129pte_alloc_one(struct mm_struct *mm, unsigned long addr)
130{
131 pte_t *pte = pte_alloc_one_kernel(mm, addr);
132
133 if (pte)
134 return virt_to_page(pte);
135
136 return NULL;
137}
138
139static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
140{
141 unsigned long color = VPTE_COLOR(address);
142 unsigned long *ret;
143
144 preempt_disable();
145 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
146 pte_quicklist[color] = (unsigned long *)(*ret);
147 ret[0] = 0;
148 pgtable_cache_size--;
149 }
150 preempt_enable();
151 return (pte_t *)ret;
152}
153
154static __inline__ void free_pte_fast(pte_t *pte)
155{
156 unsigned long color = DCACHE_COLOR((unsigned long)pte);
157
158 preempt_disable();
159 *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
160 pte_quicklist[color] = (unsigned long *) pte;
161 pgtable_cache_size++;
162 preempt_enable();
163}
164
165static __inline__ void free_pte_slow(pte_t *pte)
166{
167 free_page((unsigned long)pte);
168}
169
170static inline void pte_free_kernel(pte_t *pte) 55static inline void pte_free_kernel(pte_t *pte)
171{ 56{
172 free_pte_fast(pte); 57 kmem_cache_free(pgtable_cache, pte);
173} 58}
174 59
175static inline void pte_free(struct page *ptepage) 60static inline void pte_free(struct page *ptepage)
176{ 61{
177 free_pte_fast(page_address(ptepage)); 62 pte_free_kernel(page_address(ptepage));
178} 63}
179 64
180#define pmd_free(pmd) free_pmd_fast(pmd) 65
181#define pgd_free(pgd) free_pgd_fast(pgd) 66#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
182#define pgd_alloc(mm) get_pgd_fast() 67#define pmd_populate(MM,PMD,PTE_PAGE) \
68 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
69
70#define check_pgt_cache() do { } while (0)
183 71
184#endif /* _SPARC64_PGALLOC_H */ 72#endif /* _SPARC64_PGALLOC_H */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index f0a9b44d3eb5..ed4124edf837 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -25,7 +25,8 @@
25#include <asm/const.h> 25#include <asm/const.h>
26 26
27/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). 27/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB).
28 * The page copy blockops can use 0x2000000 to 0x10000000. 28 * The page copy blockops can use 0x2000000 to 0x4000000.
29 * The TSB is mapped in the 0x4000000 to 0x6000000 range.
29 * The PROM resides in an area spanning 0xf0000000 to 0x100000000. 30 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
30 * The vmalloc area spans 0x100000000 to 0x200000000. 31 * The vmalloc area spans 0x100000000 to 0x200000000.
31 * Since modules need to be in the lowest 32-bits of the address space, 32 * Since modules need to be in the lowest 32-bits of the address space,
@@ -34,6 +35,7 @@
34 * 0x400000000. 35 * 0x400000000.
35 */ 36 */
36#define TLBTEMP_BASE _AC(0x0000000002000000,UL) 37#define TLBTEMP_BASE _AC(0x0000000002000000,UL)
38#define TSBMAP_BASE _AC(0x0000000004000000,UL)
37#define MODULES_VADDR _AC(0x0000000010000000,UL) 39#define MODULES_VADDR _AC(0x0000000010000000,UL)
38#define MODULES_LEN _AC(0x00000000e0000000,UL) 40#define MODULES_LEN _AC(0x00000000e0000000,UL)
39#define MODULES_END _AC(0x00000000f0000000,UL) 41#define MODULES_END _AC(0x00000000f0000000,UL)
@@ -88,162 +90,538 @@
88 90
89#endif /* !(__ASSEMBLY__) */ 91#endif /* !(__ASSEMBLY__) */
90 92
91/* Spitfire/Cheetah TTE bits. */ 93/* PTE bits which are the same in SUN4U and SUN4V format. */
92#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ 94#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
93#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/ 95#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
94#define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */ 96
95#define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */ 97/* SUN4U pte bits... */
96#define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */ 98#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
97#define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */ 99#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
98#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ 100#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
99#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ 101#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
100#define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ 102#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
101#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ 103#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
102#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ 104#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
103#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ 105#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
104#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ 106#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
105#define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ 107#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
106#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ 108#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
107#define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */ 109#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
108#define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */ 110#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
109#define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */ 111#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
110#define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ 112#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
111#define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ 113#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
112#define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */ 114#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
113#define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */ 115#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
114#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */ 116#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
115#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */ 117#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
116 118#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
117/* Here are the SpitFire software bits we use in the TTE's. 119#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
118 * 120#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
119 * WARNING: If you are going to try and start using some 121#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
120 * of the soft2 bits, you will need to make 122#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
121 * modifications to the swap entry implementation. 123#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
122 * For example, one thing that could happen is that 124#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
123 * swp_entry_to_pte() would BUG_ON() if you tried 125
124 * to use one of the soft2 bits for _PAGE_FILE. 126/* SUN4V pte bits... */
125 * 127#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
126 * Like other architectures, I have aliased _PAGE_FILE with 128#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
127 * _PAGE_MODIFIED. This works because _PAGE_FILE is never 129#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
128 * interpreted that way unless _PAGE_PRESENT is clear. 130#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
129 */ 131#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
130#define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */ 132#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
131#define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */ 133#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
132#define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */ 134#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
133#define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ 135#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
134#define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */ 136#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
135#define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */ 137#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
136#define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */ 138#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
139#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
140#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
141#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
142#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
143#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
144#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
145#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
146#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
147#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
148#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
149#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
150#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
151#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
152#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
137 153
138#if PAGE_SHIFT == 13 154#if PAGE_SHIFT == 13
139#define _PAGE_SZBITS _PAGE_SZ8K 155#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
156#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
140#elif PAGE_SHIFT == 16 157#elif PAGE_SHIFT == 16
141#define _PAGE_SZBITS _PAGE_SZ64K 158#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
159#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
142#elif PAGE_SHIFT == 19 160#elif PAGE_SHIFT == 19
143#define _PAGE_SZBITS _PAGE_SZ512K 161#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U
162#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V
144#elif PAGE_SHIFT == 22 163#elif PAGE_SHIFT == 22
145#define _PAGE_SZBITS _PAGE_SZ4MB 164#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U
165#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V
146#else 166#else
147#error Wrong PAGE_SHIFT specified 167#error Wrong PAGE_SHIFT specified
148#endif 168#endif
149 169
150#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) 170#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
151#define _PAGE_SZHUGE _PAGE_SZ4MB 171#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
172#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
152#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) 173#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
153#define _PAGE_SZHUGE _PAGE_SZ512K 174#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
175#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
154#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 176#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
155#define _PAGE_SZHUGE _PAGE_SZ64K 177#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
178#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
156#endif 179#endif
157 180
158#define _PAGE_CACHE (_PAGE_CP | _PAGE_CV) 181/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
182#define __P000 __pgprot(0)
183#define __P001 __pgprot(0)
184#define __P010 __pgprot(0)
185#define __P011 __pgprot(0)
186#define __P100 __pgprot(0)
187#define __P101 __pgprot(0)
188#define __P110 __pgprot(0)
189#define __P111 __pgprot(0)
190
191#define __S000 __pgprot(0)
192#define __S001 __pgprot(0)
193#define __S010 __pgprot(0)
194#define __S011 __pgprot(0)
195#define __S100 __pgprot(0)
196#define __S101 __pgprot(0)
197#define __S110 __pgprot(0)
198#define __S111 __pgprot(0)
159 199
160#define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W) 200#ifndef __ASSEMBLY__
161#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
162#define __PRIV_BITS _PAGE_P
163 201
164#define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE) 202extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
165 203
166/* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */ 204extern unsigned long pte_sz_bits(unsigned long size);
167#define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
168 __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
169 205
170#define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ 206extern pgprot_t PAGE_KERNEL;
171 __ACCESS_BITS | _PAGE_EXEC) 207extern pgprot_t PAGE_KERNEL_LOCKED;
208extern pgprot_t PAGE_COPY;
209extern pgprot_t PAGE_SHARED;
172 210
173#define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ 211/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
174 __ACCESS_BITS | _PAGE_EXEC) 212extern unsigned long _PAGE_IE;
213extern unsigned long _PAGE_E;
214extern unsigned long _PAGE_CACHE;
175 215
176#define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ 216extern unsigned long pg_iobits;
177 __PRIV_BITS | \ 217extern unsigned long _PAGE_ALL_SZ_BITS;
178 __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC) 218extern unsigned long _PAGE_SZBITS;
179 219
180#define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ 220extern struct page *mem_map_zero;
181 _PAGE_CACHE | \ 221#define ZERO_PAGE(vaddr) (mem_map_zero)
182 __ACCESS_BITS | _PAGE_WRITE)
183 222
184#define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ 223/* PFNs are real physical page numbers. However, mem_map only begins to record
185 _PAGE_CACHE | __ACCESS_BITS) 224 * per-page information starting at pfn_base. This is to handle systems where
225 * the first physical page in the machine is at some huge physical address,
226 * such as 4GB. This is common on a partitioned E10000, for example.
227 */
228static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
229{
230 unsigned long paddr = pfn << PAGE_SHIFT;
231 unsigned long sz_bits;
232
233 sz_bits = 0UL;
234 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
235 __asm__ __volatile__(
236 "\n661: sethi %uhi(%1), %0\n"
237 " sllx %0, 32, %0\n"
238 " .section .sun4v_2insn_patch, \"ax\"\n"
239 " .word 661b\n"
240 " mov %2, %0\n"
241 " nop\n"
242 " .previous\n"
243 : "=r" (sz_bits)
244 : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V));
245 }
246 return __pte(paddr | sz_bits | pgprot_val(prot));
247}
248#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
186 249
187#define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ 250/* This one can be done with two shifts. */
188 _PAGE_CACHE | __ACCESS_BITS) 251static inline unsigned long pte_pfn(pte_t pte)
252{
253 unsigned long ret;
254
255 __asm__ __volatile__(
256 "\n661: sllx %1, %2, %0\n"
257 " srlx %0, %3, %0\n"
258 " .section .sun4v_2insn_patch, \"ax\"\n"
259 " .word 661b\n"
260 " sllx %1, %4, %0\n"
261 " srlx %0, %5, %0\n"
262 " .previous\n"
263 : "=r" (ret)
264 : "r" (pte_val(pte)),
265 "i" (21), "i" (21 + PAGE_SHIFT),
266 "i" (8), "i" (8 + PAGE_SHIFT));
267
268 return ret;
269}
270#define pte_page(x) pfn_to_page(pte_pfn(x))
189 271
190#define _PFN_MASK _PAGE_PADDR 272static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
273{
274 unsigned long mask, tmp;
275
276 /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
277 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
278 *
279 * Even if we use negation tricks the result is still a 6
280 * instruction sequence, so don't try to play fancy and just
281 * do the most straightforward implementation.
282 *
283 * Note: We encode this into 3 sun4v 2-insn patch sequences.
284 */
191 285
192#define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \ 286 __asm__ __volatile__(
193 __ACCESS_BITS | _PAGE_E) 287 "\n661: sethi %%uhi(%2), %1\n"
288 " sethi %%hi(%2), %0\n"
289 "\n662: or %1, %%ulo(%2), %1\n"
290 " or %0, %%lo(%2), %0\n"
291 "\n663: sllx %1, 32, %1\n"
292 " or %0, %1, %0\n"
293 " .section .sun4v_2insn_patch, \"ax\"\n"
294 " .word 661b\n"
295 " sethi %%uhi(%3), %1\n"
296 " sethi %%hi(%3), %0\n"
297 " .word 662b\n"
298 " or %1, %%ulo(%3), %1\n"
299 " or %0, %%lo(%3), %0\n"
300 " .word 663b\n"
301 " sllx %1, 32, %1\n"
302 " or %0, %1, %0\n"
303 " .previous\n"
304 : "=r" (mask), "=r" (tmp)
305 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
306 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
307 _PAGE_SZBITS_4U),
308 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
309 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
310 _PAGE_SZBITS_4V));
311
312 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
313}
194 314
195#define __P000 PAGE_NONE 315static inline pte_t pgoff_to_pte(unsigned long off)
196#define __P001 PAGE_READONLY_NOEXEC 316{
197#define __P010 PAGE_COPY_NOEXEC 317 off <<= PAGE_SHIFT;
198#define __P011 PAGE_COPY_NOEXEC 318
199#define __P100 PAGE_READONLY 319 __asm__ __volatile__(
200#define __P101 PAGE_READONLY 320 "\n661: or %0, %2, %0\n"
201#define __P110 PAGE_COPY 321 " .section .sun4v_1insn_patch, \"ax\"\n"
202#define __P111 PAGE_COPY 322 " .word 661b\n"
323 " or %0, %3, %0\n"
324 " .previous\n"
325 : "=r" (off)
326 : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
327
328 return __pte(off);
329}
203 330
204#define __S000 PAGE_NONE 331static inline pgprot_t pgprot_noncached(pgprot_t prot)
205#define __S001 PAGE_READONLY_NOEXEC 332{
206#define __S010 PAGE_SHARED_NOEXEC 333 unsigned long val = pgprot_val(prot);
207#define __S011 PAGE_SHARED_NOEXEC 334
208#define __S100 PAGE_READONLY 335 __asm__ __volatile__(
209#define __S101 PAGE_READONLY 336 "\n661: andn %0, %2, %0\n"
210#define __S110 PAGE_SHARED 337 " or %0, %3, %0\n"
211#define __S111 PAGE_SHARED 338 " .section .sun4v_2insn_patch, \"ax\"\n"
339 " .word 661b\n"
340 " andn %0, %4, %0\n"
341 " or %0, %3, %0\n"
342 " .previous\n"
343 : "=r" (val)
344 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
345 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
346
347 return __pgprot(val);
348}
349/* Various pieces of code check for platform support by ifdef testing
350 * on "pgprot_noncached". That's broken and should be fixed, but for
351 * now...
352 */
353#define pgprot_noncached pgprot_noncached
212 354
213#ifndef __ASSEMBLY__ 355#ifdef CONFIG_HUGETLB_PAGE
356static inline pte_t pte_mkhuge(pte_t pte)
357{
358 unsigned long mask;
359
360 __asm__ __volatile__(
361 "\n661: sethi %%uhi(%1), %0\n"
362 " sllx %0, 32, %0\n"
363 " .section .sun4v_2insn_patch, \"ax\"\n"
364 " .word 661b\n"
365 " mov %2, %0\n"
366 " nop\n"
367 " .previous\n"
368 : "=r" (mask)
369 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
370
371 return __pte(pte_val(pte) | mask);
372}
373#endif
214 374
215extern unsigned long phys_base; 375static inline pte_t pte_mkdirty(pte_t pte)
216extern unsigned long pfn_base; 376{
377 unsigned long val = pte_val(pte), tmp;
378
379 __asm__ __volatile__(
380 "\n661: or %0, %3, %0\n"
381 " nop\n"
382 "\n662: nop\n"
383 " nop\n"
384 " .section .sun4v_2insn_patch, \"ax\"\n"
385 " .word 661b\n"
386 " sethi %%uhi(%4), %1\n"
387 " sllx %1, 32, %1\n"
388 " .word 662b\n"
389 " or %1, %%lo(%4), %1\n"
390 " or %0, %1, %0\n"
391 " .previous\n"
392 : "=r" (val), "=r" (tmp)
393 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
394 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
395
396 return __pte(val);
397}
217 398
218extern struct page *mem_map_zero; 399static inline pte_t pte_mkclean(pte_t pte)
219#define ZERO_PAGE(vaddr) (mem_map_zero) 400{
401 unsigned long val = pte_val(pte), tmp;
402
403 __asm__ __volatile__(
404 "\n661: andn %0, %3, %0\n"
405 " nop\n"
406 "\n662: nop\n"
407 " nop\n"
408 " .section .sun4v_2insn_patch, \"ax\"\n"
409 " .word 661b\n"
410 " sethi %%uhi(%4), %1\n"
411 " sllx %1, 32, %1\n"
412 " .word 662b\n"
413 " or %1, %%lo(%4), %1\n"
414 " andn %0, %1, %0\n"
415 " .previous\n"
416 : "=r" (val), "=r" (tmp)
417 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
418 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
419
420 return __pte(val);
421}
220 422
221/* PFNs are real physical page numbers. However, mem_map only begins to record 423static inline pte_t pte_mkwrite(pte_t pte)
222 * per-page information starting at pfn_base. This is to handle systems where 424{
223 * the first physical page in the machine is at some huge physical address, 425 unsigned long val = pte_val(pte), mask;
224 * such as 4GB. This is common on a partitioned E10000, for example. 426
225 */ 427 __asm__ __volatile__(
428 "\n661: mov %1, %0\n"
429 " nop\n"
430 " .section .sun4v_2insn_patch, \"ax\"\n"
431 " .word 661b\n"
432 " sethi %%uhi(%2), %0\n"
433 " sllx %0, 32, %0\n"
434 " .previous\n"
435 : "=r" (mask)
436 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
437
438 return __pte(val | mask);
439}
226 440
227#define pfn_pte(pfn, prot) \ 441static inline pte_t pte_wrprotect(pte_t pte)
228 __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS) 442{
229#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 443 unsigned long val = pte_val(pte), tmp;
444
445 __asm__ __volatile__(
446 "\n661: andn %0, %3, %0\n"
447 " nop\n"
448 "\n662: nop\n"
449 " nop\n"
450 " .section .sun4v_2insn_patch, \"ax\"\n"
451 " .word 661b\n"
452 " sethi %%uhi(%4), %1\n"
453 " sllx %1, 32, %1\n"
454 " .word 662b\n"
455 " or %1, %%lo(%4), %1\n"
456 " andn %0, %1, %0\n"
457 " .previous\n"
458 : "=r" (val), "=r" (tmp)
459 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
460 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
461
462 return __pte(val);
463}
464
465static inline pte_t pte_mkold(pte_t pte)
466{
467 unsigned long mask;
468
469 __asm__ __volatile__(
470 "\n661: mov %1, %0\n"
471 " nop\n"
472 " .section .sun4v_2insn_patch, \"ax\"\n"
473 " .word 661b\n"
474 " sethi %%uhi(%2), %0\n"
475 " sllx %0, 32, %0\n"
476 " .previous\n"
477 : "=r" (mask)
478 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
479
480 mask |= _PAGE_R;
481
482 return __pte(pte_val(pte) & ~mask);
483}
484
485static inline pte_t pte_mkyoung(pte_t pte)
486{
487 unsigned long mask;
488
489 __asm__ __volatile__(
490 "\n661: mov %1, %0\n"
491 " nop\n"
492 " .section .sun4v_2insn_patch, \"ax\"\n"
493 " .word 661b\n"
494 " sethi %%uhi(%2), %0\n"
495 " sllx %0, 32, %0\n"
496 " .previous\n"
497 : "=r" (mask)
498 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
499
500 mask |= _PAGE_R;
501
502 return __pte(pte_val(pte) | mask);
503}
230 504
231#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT) 505static inline unsigned long pte_young(pte_t pte)
232#define pte_page(x) pfn_to_page(pte_pfn(x)) 506{
507 unsigned long mask;
508
509 __asm__ __volatile__(
510 "\n661: mov %1, %0\n"
511 " nop\n"
512 " .section .sun4v_2insn_patch, \"ax\"\n"
513 " .word 661b\n"
514 " sethi %%uhi(%2), %0\n"
515 " sllx %0, 32, %0\n"
516 " .previous\n"
517 : "=r" (mask)
518 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
519
520 return (pte_val(pte) & mask);
521}
522
523static inline unsigned long pte_dirty(pte_t pte)
524{
525 unsigned long mask;
526
527 __asm__ __volatile__(
528 "\n661: mov %1, %0\n"
529 " nop\n"
530 " .section .sun4v_2insn_patch, \"ax\"\n"
531 " .word 661b\n"
532 " sethi %%uhi(%2), %0\n"
533 " sllx %0, 32, %0\n"
534 " .previous\n"
535 : "=r" (mask)
536 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
537
538 return (pte_val(pte) & mask);
539}
233 540
234static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) 541static inline unsigned long pte_write(pte_t pte)
235{ 542{
236 pte_t __pte; 543 unsigned long mask;
237 const unsigned long preserve_mask = (_PFN_MASK | 544
238 _PAGE_MODIFIED | _PAGE_ACCESSED | 545 __asm__ __volatile__(
239 _PAGE_CACHE | _PAGE_E | 546 "\n661: mov %1, %0\n"
240 _PAGE_PRESENT | _PAGE_SZBITS); 547 " nop\n"
548 " .section .sun4v_2insn_patch, \"ax\"\n"
549 " .word 661b\n"
550 " sethi %%uhi(%2), %0\n"
551 " sllx %0, 32, %0\n"
552 " .previous\n"
553 : "=r" (mask)
554 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
555
556 return (pte_val(pte) & mask);
557}
241 558
242 pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) | 559static inline unsigned long pte_exec(pte_t pte)
243 (pgprot_val(new_prot) & ~preserve_mask); 560{
561 unsigned long mask;
562
563 __asm__ __volatile__(
564 "\n661: sethi %%hi(%1), %0\n"
565 " .section .sun4v_1insn_patch, \"ax\"\n"
566 " .word 661b\n"
567 " mov %2, %0\n"
568 " .previous\n"
569 : "=r" (mask)
570 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
571
572 return (pte_val(pte) & mask);
573}
244 574
245 return __pte; 575static inline unsigned long pte_read(pte_t pte)
576{
577 unsigned long mask;
578
579 __asm__ __volatile__(
580 "\n661: mov %1, %0\n"
581 " nop\n"
582 " .section .sun4v_2insn_patch, \"ax\"\n"
583 " .word 661b\n"
584 " sethi %%uhi(%2), %0\n"
585 " sllx %0, 32, %0\n"
586 " .previous\n"
587 : "=r" (mask)
588 : "i" (_PAGE_READ_4U), "i" (_PAGE_READ_4V));
589
590 return (pte_val(pte) & mask);
246} 591}
592
593static inline unsigned long pte_file(pte_t pte)
594{
595 unsigned long val = pte_val(pte);
596
597 __asm__ __volatile__(
598 "\n661: and %0, %2, %0\n"
599 " .section .sun4v_1insn_patch, \"ax\"\n"
600 " .word 661b\n"
601 " and %0, %3, %0\n"
602 " .previous\n"
603 : "=r" (val)
604 : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
605
606 return val;
607}
608
609static inline unsigned long pte_present(pte_t pte)
610{
611 unsigned long val = pte_val(pte);
612
613 __asm__ __volatile__(
614 "\n661: and %0, %2, %0\n"
615 " .section .sun4v_1insn_patch, \"ax\"\n"
616 " .word 661b\n"
617 " and %0, %3, %0\n"
618 " .previous\n"
619 : "=r" (val)
620 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
621
622 return val;
623}
624
247#define pmd_set(pmdp, ptep) \ 625#define pmd_set(pmdp, ptep) \
248 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) 626 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
249#define pud_set(pudp, pmdp) \ 627#define pud_set(pudp, pmdp) \
@@ -253,8 +631,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
253#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) 631#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
254#define pud_page(pud) \ 632#define pud_page(pud) \
255 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) 633 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
256#define pte_none(pte) (!pte_val(pte))
257#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
258#define pmd_none(pmd) (!pmd_val(pmd)) 634#define pmd_none(pmd) (!pmd_val(pmd))
259#define pmd_bad(pmd) (0) 635#define pmd_bad(pmd) (0)
260#define pmd_present(pmd) (pmd_val(pmd) != 0U) 636#define pmd_present(pmd) (pmd_val(pmd) != 0U)
@@ -264,30 +640,8 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
264#define pud_present(pud) (pud_val(pud) != 0U) 640#define pud_present(pud) (pud_val(pud) != 0U)
265#define pud_clear(pudp) (pud_val(*(pudp)) = 0U) 641#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
266 642
267/* The following only work if pte_present() is true. 643/* Same in both SUN4V and SUN4U. */
268 * Undefined behaviour if not.. 644#define pte_none(pte) (!pte_val(pte))
269 */
270#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
271#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC)
272#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
273#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
274#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
275#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W)))
276#define pte_rdprotect(pte) \
277 (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ))
278#define pte_mkclean(pte) \
279 (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
280#define pte_mkold(pte) \
281 (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
282
283/* Permanent address of a page. */
284#define __page_address(page) page_address(page)
285
286/* Be very careful when you change these three, they are delicate. */
287#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
288#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE))
289#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))
290#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE))
291 645
292/* to find an entry in a page-table-directory. */ 646/* to find an entry in a page-table-directory. */
293#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 647#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
@@ -296,11 +650,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
296/* to find an entry in a kernel page-table-directory */ 650/* to find an entry in a kernel page-table-directory */
297#define pgd_offset_k(address) pgd_offset(&init_mm, address) 651#define pgd_offset_k(address) pgd_offset(&init_mm, address)
298 652
299/* extract the pgd cache used for optimizing the tlb miss
300 * slow path when executing 32-bit compat processes
301 */
302#define get_pgd_cache(pgd) ((unsigned long) pgd_val(*pgd) << 11)
303
304/* Find an entry in the second-level page table.. */ 653/* Find an entry in the second-level page table.. */
305#define pmd_offset(pudp, address) \ 654#define pmd_offset(pudp, address) \
306 ((pmd_t *) pud_page(*(pudp)) + \ 655 ((pmd_t *) pud_page(*(pudp)) + \
@@ -327,6 +676,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
327 676
328 /* It is more efficient to let flush_tlb_kernel_range() 677 /* It is more efficient to let flush_tlb_kernel_range()
329 * handle init_mm tlb flushes. 678 * handle init_mm tlb flushes.
679 *
680 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
681 * and SUN4V pte layout, so this inline test is fine.
330 */ 682 */
331 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) 683 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
332 tlb_batch_add(mm, addr, ptep, orig); 684 tlb_batch_add(mm, addr, ptep, orig);
@@ -361,42 +713,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
361#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 713#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
362 714
363/* File offset in PTE support. */ 715/* File offset in PTE support. */
364#define pte_file(pte) (pte_val(pte) & _PAGE_FILE) 716extern unsigned long pte_file(pte_t);
365#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) 717#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
366#define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE)) 718extern pte_t pgoff_to_pte(unsigned long);
367#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) 719#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
368 720
369extern unsigned long prom_virt_to_phys(unsigned long, int *); 721extern unsigned long prom_virt_to_phys(unsigned long, int *);
370 722
371static __inline__ unsigned long 723extern unsigned long sun4u_get_pte(unsigned long);
372sun4u_get_pte (unsigned long addr)
373{
374 pgd_t *pgdp;
375 pud_t *pudp;
376 pmd_t *pmdp;
377 pte_t *ptep;
378
379 if (addr >= PAGE_OFFSET)
380 return addr & _PAGE_PADDR;
381 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
382 return prom_virt_to_phys(addr, NULL);
383 pgdp = pgd_offset_k(addr);
384 pudp = pud_offset(pgdp, addr);
385 pmdp = pmd_offset(pudp, addr);
386 ptep = pte_offset_kernel(pmdp, addr);
387 return pte_val(*ptep) & _PAGE_PADDR;
388}
389 724
390static __inline__ unsigned long 725static inline unsigned long __get_phys(unsigned long addr)
391__get_phys (unsigned long addr)
392{ 726{
393 return sun4u_get_pte (addr); 727 return sun4u_get_pte(addr);
394} 728}
395 729
396static __inline__ int 730static inline int __get_iospace(unsigned long addr)
397__get_iospace (unsigned long addr)
398{ 731{
399 return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); 732 return ((sun4u_get_pte(addr) & 0xf0000000) >> 28);
400} 733}
401 734
402extern unsigned long *sparc64_valid_addr_bitmap; 735extern unsigned long *sparc64_valid_addr_bitmap;
@@ -409,11 +742,6 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
409 unsigned long pfn, 742 unsigned long pfn,
410 unsigned long size, pgprot_t prot); 743 unsigned long size, pgprot_t prot);
411 744
412/* Clear virtual and physical cachability, set side-effect bit. */
413#define pgprot_noncached(prot) \
414 (__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \
415 _PAGE_E))
416
417/* 745/*
418 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 746 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
419 * its high 4 bits. These macros/functions put it there or get it from there. 747 * its high 4 bits. These macros/functions put it there or get it from there.
@@ -424,8 +752,11 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
424 752
425#include <asm-generic/pgtable.h> 753#include <asm-generic/pgtable.h>
426 754
427/* We provide our own get_unmapped_area to cope with VA holes for userland */ 755/* We provide our own get_unmapped_area to cope with VA holes and
756 * SHM area cache aliasing for userland.
757 */
428#define HAVE_ARCH_UNMAPPED_AREA 758#define HAVE_ARCH_UNMAPPED_AREA
759#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
429 760
430/* We provide a special get_unmapped_area for framebuffer mmaps to try and use 761/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
431 * the largest alignment possible such that larget PTEs can be used. 762 * the largest alignment possible such that larget PTEs can be used.
@@ -435,12 +766,9 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
435 unsigned long); 766 unsigned long);
436#define HAVE_ARCH_FB_UNMAPPED_AREA 767#define HAVE_ARCH_FB_UNMAPPED_AREA
437 768
438/* 769extern void pgtable_cache_init(void);
439 * No page table caches to initialise 770extern void sun4v_register_fault_status(void);
440 */ 771extern void sun4v_ktsb_register(void);
441#define pgtable_cache_init() do { } while (0)
442
443extern void check_pgt_cache(void);
444 772
445#endif /* !(__ASSEMBLY__) */ 773#endif /* !(__ASSEMBLY__) */
446 774
diff --git a/include/asm-sparc64/pil.h b/include/asm-sparc64/pil.h
index 8f87750c3517..79f827eb3f5d 100644
--- a/include/asm-sparc64/pil.h
+++ b/include/asm-sparc64/pil.h
@@ -16,11 +16,13 @@
16#define PIL_SMP_CALL_FUNC 1 16#define PIL_SMP_CALL_FUNC 1
17#define PIL_SMP_RECEIVE_SIGNAL 2 17#define PIL_SMP_RECEIVE_SIGNAL 2
18#define PIL_SMP_CAPTURE 3 18#define PIL_SMP_CAPTURE 3
19#define PIL_SMP_CTX_NEW_VERSION 4
19 20
20#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
21#define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ 22#define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \
22 (PIL) == PIL_SMP_RECEIVE_SIGNAL || \ 23 (PIL) == PIL_SMP_RECEIVE_SIGNAL || \
23 (PIL) == PIL_SMP_CAPTURE) 24 (PIL) == PIL_SMP_CAPTURE || \
25 (PIL) == PIL_SMP_CTX_NEW_VERSION)
24#endif 26#endif
25 27
26#endif /* !(_SPARC64_PIL_H) */ 28#endif /* !(_SPARC64_PIL_H) */
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index cd8d9b4c8658..c6896b88283e 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -28,6 +28,8 @@
28 * User lives in his very own context, and cannot reference us. Note 28 * User lives in his very own context, and cannot reference us. Note
29 * that TASK_SIZE is a misnomer, it really gives maximum user virtual 29 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
30 * address that the kernel will allocate out. 30 * address that the kernel will allocate out.
31 *
32 * XXX No longer using virtual page tables, kill this upper limit...
31 */ 33 */
32#define VA_BITS 44 34#define VA_BITS 44
33#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
@@ -37,18 +39,6 @@
37#endif 39#endif
38#define TASK_SIZE ((unsigned long)-VPTE_SIZE) 40#define TASK_SIZE ((unsigned long)-VPTE_SIZE)
39 41
40/*
41 * The vpte base must be able to hold the entire vpte, half
42 * of which lives above, and half below, the base. And it
43 * is placed as close to the highest address range as possible.
44 */
45#define VPTE_BASE_SPITFIRE (-(VPTE_SIZE/2))
46#if 1
47#define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE
48#else
49#define VPTE_BASE_CHEETAH 0xffe0000000000000
50#endif
51
52#ifndef __ASSEMBLY__ 42#ifndef __ASSEMBLY__
53 43
54typedef struct { 44typedef struct {
@@ -101,7 +91,8 @@ extern unsigned long thread_saved_pc(struct task_struct *);
101/* Do necessary setup to start up a newly executed thread. */ 91/* Do necessary setup to start up a newly executed thread. */
102#define start_thread(regs, pc, sp) \ 92#define start_thread(regs, pc, sp) \
103do { \ 93do { \
104 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (ASI_PNF << 24); \ 94 unsigned long __asi = ASI_PNF; \
95 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
105 regs->tpc = ((pc & (~3)) - 4); \ 96 regs->tpc = ((pc & (~3)) - 4); \
106 regs->tnpc = regs->tpc + 4; \ 97 regs->tnpc = regs->tpc + 4; \
107 regs->y = 0; \ 98 regs->y = 0; \
@@ -138,10 +129,10 @@ do { \
138 129
139#define start_thread32(regs, pc, sp) \ 130#define start_thread32(regs, pc, sp) \
140do { \ 131do { \
132 unsigned long __asi = ASI_PNF; \
141 pc &= 0x00000000ffffffffUL; \ 133 pc &= 0x00000000ffffffffUL; \
142 sp &= 0x00000000ffffffffUL; \ 134 sp &= 0x00000000ffffffffUL; \
143\ 135 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
144 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM); \
145 regs->tpc = ((pc & (~3)) - 4); \ 136 regs->tpc = ((pc & (~3)) - 4); \
146 regs->tnpc = regs->tpc + 4; \ 137 regs->tnpc = regs->tpc + 4; \
147 regs->y = 0; \ 138 regs->y = 0; \
@@ -226,6 +217,8 @@ static inline void prefetchw(const void *x)
226 217
227#define spin_lock_prefetch(x) prefetchw(x) 218#define spin_lock_prefetch(x) prefetchw(x)
228 219
220#define HAVE_ARCH_PICK_MMAP_LAYOUT
221
229#endif /* !(__ASSEMBLY__) */ 222#endif /* !(__ASSEMBLY__) */
230 223
231#endif /* !(__ASM_SPARC64_PROCESSOR_H) */ 224#endif /* !(__ASM_SPARC64_PROCESSOR_H) */
diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h
index 29fb74aa805d..49a7924a89ab 100644
--- a/include/asm-sparc64/pstate.h
+++ b/include/asm-sparc64/pstate.h
@@ -28,11 +28,12 @@
28 28
29/* The V9 TSTATE Register (with SpitFire and Linux extensions). 29/* The V9 TSTATE Register (with SpitFire and Linux extensions).
30 * 30 *
31 * --------------------------------------------------------------- 31 * ---------------------------------------------------------------------
32 * | Resv | CCR | ASI | %pil | PSTATE | Resv | CWP | 32 * | Resv | GL | CCR | ASI | %pil | PSTATE | Resv | CWP |
33 * --------------------------------------------------------------- 33 * ---------------------------------------------------------------------
34 * 63 40 39 32 31 24 23 20 19 8 7 5 4 0 34 * 63 43 42 40 39 32 31 24 23 20 19 8 7 5 4 0
35 */ 35 */
36#define TSTATE_GL _AC(0x0000070000000000,UL) /* Global reg level */
36#define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */ 37#define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */
37#define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */ 38#define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */
38#define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */ 39#define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */
diff --git a/include/asm-sparc64/scratchpad.h b/include/asm-sparc64/scratchpad.h
new file mode 100644
index 000000000000..5e8b01fb3343
--- /dev/null
+++ b/include/asm-sparc64/scratchpad.h
@@ -0,0 +1,14 @@
1#ifndef _SPARC64_SCRATCHPAD_H
2#define _SPARC64_SCRATCHPAD_H
3
4/* Sun4v scratchpad registers, accessed via ASI_SCRATCHPAD. */
5
6#define SCRATCHPAD_MMU_MISS 0x00 /* Shared with OBP - set by OBP */
7#define SCRATCHPAD_CPUID 0x08 /* Shared with OBP - set by hypervisor */
8#define SCRATCHPAD_UTSBREG1 0x10
9#define SCRATCHPAD_UTSBREG2 0x18
10 /* 0x20 and 0x28, hypervisor only... */
11#define SCRATCHPAD_UNUSED1 0x30
12#define SCRATCHPAD_UNUSED2 0x38 /* Reserved for OBP */
13
14#endif /* !(_SPARC64_SCRATCHPAD_H) */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 473edb2603ec..89d86ecaab24 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -33,37 +33,13 @@
33extern cpumask_t phys_cpu_present_map; 33extern cpumask_t phys_cpu_present_map;
34#define cpu_possible_map phys_cpu_present_map 34#define cpu_possible_map phys_cpu_present_map
35 35
36extern cpumask_t cpu_sibling_map[NR_CPUS];
37
36/* 38/*
37 * General functions that each host system must provide. 39 * General functions that each host system must provide.
38 */ 40 */
39 41
40static __inline__ int hard_smp_processor_id(void) 42extern int hard_smp_processor_id(void);
41{
42 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
43 unsigned long cfg, ver;
44 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
45 if ((ver >> 32) == 0x003e0016) {
46 __asm__ __volatile__("ldxa [%%g0] %1, %0"
47 : "=r" (cfg)
48 : "i" (ASI_JBUS_CONFIG));
49 return ((cfg >> 17) & 0x1f);
50 } else {
51 __asm__ __volatile__("ldxa [%%g0] %1, %0"
52 : "=r" (cfg)
53 : "i" (ASI_SAFARI_CONFIG));
54 return ((cfg >> 17) & 0x3ff);
55 }
56 } else if (this_is_starfire != 0) {
57 return starfire_hard_smp_processor_id();
58 } else {
59 unsigned long upaconfig;
60 __asm__ __volatile__("ldxa [%%g0] %1, %0"
61 : "=r" (upaconfig)
62 : "i" (ASI_UPA_CONFIG));
63 return ((upaconfig >> 17) & 0x1f);
64 }
65}
66
67#define raw_smp_processor_id() (current_thread_info()->cpu) 43#define raw_smp_processor_id() (current_thread_info()->cpu)
68 44
69extern void smp_setup_cpu_possible_map(void); 45extern void smp_setup_cpu_possible_map(void);
diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h
new file mode 100644
index 000000000000..ed5c9d8541e2
--- /dev/null
+++ b/include/asm-sparc64/sparsemem.h
@@ -0,0 +1,12 @@
1#ifndef _SPARC64_SPARSEMEM_H
2#define _SPARC64_SPARSEMEM_H
3
4#ifdef __KERNEL__
5
6#define SECTION_SIZE_BITS 26
7#define MAX_PHYSADDR_BITS 42
8#define MAX_PHYSMEM_BITS 42
9
10#endif /* !(__KERNEL__) */
11
12#endif /* !(_SPARC64_SPARSEMEM_H) */
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
index 962638c9d122..23ad8a7987ad 100644
--- a/include/asm-sparc64/spitfire.h
+++ b/include/asm-sparc64/spitfire.h
@@ -44,6 +44,7 @@ enum ultra_tlb_layout {
44 spitfire = 0, 44 spitfire = 0,
45 cheetah = 1, 45 cheetah = 1,
46 cheetah_plus = 2, 46 cheetah_plus = 2,
47 hypervisor = 3,
47}; 48};
48 49
49extern enum ultra_tlb_layout tlb_type; 50extern enum ultra_tlb_layout tlb_type;
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index af254e581834..a18ec87a52c1 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -209,9 +209,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
209 /* so that ASI is only written if it changes, think again. */ \ 209 /* so that ASI is only written if it changes, think again. */ \
210 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 210 __asm__ __volatile__("wr %%g0, %0, %%asi" \
211 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ 211 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
212 trap_block[current_thread_info()->cpu].thread = \
213 task_thread_info(next); \
212 __asm__ __volatile__( \ 214 __asm__ __volatile__( \
213 "mov %%g4, %%g7\n\t" \ 215 "mov %%g4, %%g7\n\t" \
214 "wrpr %%g0, 0x95, %%pstate\n\t" \
215 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ 216 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
216 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ 217 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
217 "rdpr %%wstate, %%o5\n\t" \ 218 "rdpr %%wstate, %%o5\n\t" \
@@ -225,14 +226,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
225 "ldx [%%g6 + %3], %%o6\n\t" \ 226 "ldx [%%g6 + %3], %%o6\n\t" \
226 "ldub [%%g6 + %2], %%o5\n\t" \ 227 "ldub [%%g6 + %2], %%o5\n\t" \
227 "ldub [%%g6 + %4], %%o7\n\t" \ 228 "ldub [%%g6 + %4], %%o7\n\t" \
228 "mov %%g6, %%l2\n\t" \
229 "wrpr %%o5, 0x0, %%wstate\n\t" \ 229 "wrpr %%o5, 0x0, %%wstate\n\t" \
230 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ 230 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
231 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ 231 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
232 "wrpr %%g0, 0x94, %%pstate\n\t" \
233 "mov %%l2, %%g6\n\t" \
234 "ldx [%%g6 + %6], %%g4\n\t" \ 232 "ldx [%%g6 + %6], %%g4\n\t" \
235 "wrpr %%g0, 0x96, %%pstate\n\t" \
236 "brz,pt %%o7, 1f\n\t" \ 233 "brz,pt %%o7, 1f\n\t" \
237 " mov %%g7, %0\n\t" \ 234 " mov %%g7, %0\n\t" \
238 "b,a ret_from_syscall\n\t" \ 235 "b,a ret_from_syscall\n\t" \
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index ac9d068aab4f..2ebf7f27bf91 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -64,8 +64,6 @@ struct thread_info {
64 __u64 kernel_cntd0, kernel_cntd1; 64 __u64 kernel_cntd0, kernel_cntd1;
65 __u64 pcr_reg; 65 __u64 pcr_reg;
66 66
67 __u64 cee_stuff;
68
69 struct restart_block restart_block; 67 struct restart_block restart_block;
70 68
71 struct pt_regs *kern_una_regs; 69 struct pt_regs *kern_una_regs;
@@ -104,10 +102,9 @@ struct thread_info {
104#define TI_KERN_CNTD0 0x00000480 102#define TI_KERN_CNTD0 0x00000480
105#define TI_KERN_CNTD1 0x00000488 103#define TI_KERN_CNTD1 0x00000488
106#define TI_PCR 0x00000490 104#define TI_PCR 0x00000490
107#define TI_CEE_STUFF 0x00000498 105#define TI_RESTART_BLOCK 0x00000498
108#define TI_RESTART_BLOCK 0x000004a0 106#define TI_KUNA_REGS 0x000004c0
109#define TI_KUNA_REGS 0x000004c8 107#define TI_KUNA_INSN 0x000004c8
110#define TI_KUNA_INSN 0x000004d0
111#define TI_FPREGS 0x00000500 108#define TI_FPREGS 0x00000500
112 109
113/* We embed this in the uppermost byte of thread_info->flags */ 110/* We embed this in the uppermost byte of thread_info->flags */
diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h
index 9e8d4175bcb2..2a5e4ebaad80 100644
--- a/include/asm-sparc64/timex.h
+++ b/include/asm-sparc64/timex.h
@@ -14,4 +14,10 @@
14typedef unsigned long cycles_t; 14typedef unsigned long cycles_t;
15#define get_cycles() tick_ops->get_tick() 15#define get_cycles() tick_ops->get_tick()
16 16
17#define ARCH_HAS_READ_CURRENT_TIMER 1
18#define read_current_timer(timer_val_p) \
19({ *timer_val_p = tick_ops->get_tick(); \
20 0; \
21})
22
17#endif 23#endif
diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h
index 3ef9909ac3ac..9ad5d9c51d42 100644
--- a/include/asm-sparc64/tlbflush.h
+++ b/include/asm-sparc64/tlbflush.h
@@ -5,6 +5,11 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <asm/mmu_context.h> 6#include <asm/mmu_context.h>
7 7
8/* TSB flush operations. */
9struct mmu_gather;
10extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
11extern void flush_tsb_user(struct mmu_gather *mp);
12
8/* TLB flush operations. */ 13/* TLB flush operations. */
9 14
10extern void flush_tlb_pending(void); 15extern void flush_tlb_pending(void);
@@ -14,28 +19,36 @@ extern void flush_tlb_pending(void);
14#define flush_tlb_page(vma,addr) flush_tlb_pending() 19#define flush_tlb_page(vma,addr) flush_tlb_pending()
15#define flush_tlb_mm(mm) flush_tlb_pending() 20#define flush_tlb_mm(mm) flush_tlb_pending()
16 21
22/* Local cpu only. */
17extern void __flush_tlb_all(void); 23extern void __flush_tlb_all(void);
24
18extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r); 25extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
19 26
20extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); 27extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
21 28
22#ifndef CONFIG_SMP 29#ifndef CONFIG_SMP
23 30
24#define flush_tlb_all() __flush_tlb_all()
25#define flush_tlb_kernel_range(start,end) \ 31#define flush_tlb_kernel_range(start,end) \
26 __flush_tlb_kernel_range(start,end) 32do { flush_tsb_kernel_range(start,end); \
33 __flush_tlb_kernel_range(start,end); \
34} while (0)
27 35
28#else /* CONFIG_SMP */ 36#else /* CONFIG_SMP */
29 37
30extern void smp_flush_tlb_all(void);
31extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); 38extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
32 39
33#define flush_tlb_all() smp_flush_tlb_all()
34#define flush_tlb_kernel_range(start, end) \ 40#define flush_tlb_kernel_range(start, end) \
35 smp_flush_tlb_kernel_range(start, end) 41do { flush_tsb_kernel_range(start,end); \
42 smp_flush_tlb_kernel_range(start, end); \
43} while (0)
36 44
37#endif /* ! CONFIG_SMP */ 45#endif /* ! CONFIG_SMP */
38 46
39extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long); 47static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
48{
49 /* We don't use virtual page tables for TLB miss processing
50 * any more. Nowadays we use the TSB.
51 */
52}
40 53
41#endif /* _SPARC64_TLBFLUSH_H */ 54#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
new file mode 100644
index 000000000000..e82612cd9f33
--- /dev/null
+++ b/include/asm-sparc64/tsb.h
@@ -0,0 +1,281 @@
1#ifndef _SPARC64_TSB_H
2#define _SPARC64_TSB_H
3
4/* The sparc64 TSB is similar to the powerpc hashtables. It's a
5 * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes
6 * pointers into this table for 8K and 64K page sizes, and also a
7 * comparison TAG based upon the virtual address and context which
8 * faults.
9 *
10 * TLB miss trap handler software does the actual lookup via something
11 * of the form:
12 *
13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
14 * ldxa [%g0] ASI_{D,I}MMU, %g6
15 * sllx %g6, 22, %g6
16 * srlx %g6, 22, %g6
17 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4
18 * cmp %g4, %g6
19 * bne,pn %xcc, tsb_miss_{d,i}tlb
20 * mov FAULT_CODE_{D,I}TLB, %g3
21 * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN
22 * retry
23 *
24 *
25 * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
26 * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu
27 * register which is:
28 *
29 * -------------------------------------------------
30 * | - | CONTEXT | - | VADDR bits 63:22 |
31 * -------------------------------------------------
32 * 63 61 60 48 47 42 41 0
33 *
34 * But actually, since we use per-mm TSB's, we zero out the CONTEXT
35 * field.
36 *
37 * Like the powerpc hashtables we need to use locking in order to
38 * synchronize while we update the entries. PTE updates need locking
39 * as well.
40 *
41 * We need to carefully choose a lock bits for the TSB entry. We
42 * choose to use bit 47 in the tag. Also, since we never map anything
43 * at page zero in context zero, we use zero as an invalid tag entry.
44 * When the lock bit is set, this forces a tag comparison failure.
45 */
46
47#define TSB_TAG_LOCK_BIT 47
48#define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32))
49
50#define TSB_TAG_INVALID_BIT 46
51#define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32))
52
53#define TSB_MEMBAR membar #StoreStore
54
55/* Some cpus support physical address quad loads. We want to use
56 * those if possible so we don't need to hard-lock the TSB mapping
57 * into the TLB. We encode some instruction patching in order to
58 * support this.
59 *
60 * The kernel TSB is locked into the TLB by virtue of being in the
61 * kernel image, so we don't play these games for swapper_tsb access.
62 */
63#ifndef __ASSEMBLY__
64struct tsb_ldquad_phys_patch_entry {
65 unsigned int addr;
66 unsigned int sun4u_insn;
67 unsigned int sun4v_insn;
68};
69extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch,
70 __tsb_ldquad_phys_patch_end;
71
72struct tsb_phys_patch_entry {
73 unsigned int addr;
74 unsigned int insn;
75};
76extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
77#endif
78#define TSB_LOAD_QUAD(TSB, REG) \
79661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
80 .section .tsb_ldquad_phys_patch, "ax"; \
81 .word 661b; \
82 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
83 ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \
84 .previous
85
86#define TSB_LOAD_TAG_HIGH(TSB, REG) \
87661: lduwa [TSB] ASI_N, REG; \
88 .section .tsb_phys_patch, "ax"; \
89 .word 661b; \
90 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
91 .previous
92
93#define TSB_LOAD_TAG(TSB, REG) \
94661: ldxa [TSB] ASI_N, REG; \
95 .section .tsb_phys_patch, "ax"; \
96 .word 661b; \
97 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
98 .previous
99
100#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
101661: casa [TSB] ASI_N, REG1, REG2; \
102 .section .tsb_phys_patch, "ax"; \
103 .word 661b; \
104 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
105 .previous
106
107#define TSB_CAS_TAG(TSB, REG1, REG2) \
108661: casxa [TSB] ASI_N, REG1, REG2; \
109 .section .tsb_phys_patch, "ax"; \
110 .word 661b; \
111 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
112 .previous
113
114#define TSB_STORE(ADDR, VAL) \
115661: stxa VAL, [ADDR] ASI_N; \
116 .section .tsb_phys_patch, "ax"; \
117 .word 661b; \
118 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
119 .previous
120
121#define TSB_LOCK_TAG(TSB, REG1, REG2) \
12299: TSB_LOAD_TAG_HIGH(TSB, REG1); \
123 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
124 andcc REG1, REG2, %g0; \
125 bne,pn %icc, 99b; \
126 nop; \
127 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
128 cmp REG1, REG2; \
129 bne,pn %icc, 99b; \
130 nop; \
131 TSB_MEMBAR
132
133#define TSB_WRITE(TSB, TTE, TAG) \
134 add TSB, 0x8, TSB; \
135 TSB_STORE(TSB, TTE); \
136 sub TSB, 0x8, TSB; \
137 TSB_MEMBAR; \
138 TSB_STORE(TSB, TAG);
139
140#define KTSB_LOAD_QUAD(TSB, REG) \
141 ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
142
143#define KTSB_STORE(ADDR, VAL) \
144 stxa VAL, [ADDR] ASI_N;
145
146#define KTSB_LOCK_TAG(TSB, REG1, REG2) \
14799: lduwa [TSB] ASI_N, REG1; \
148 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
149 andcc REG1, REG2, %g0; \
150 bne,pn %icc, 99b; \
151 nop; \
152 casa [TSB] ASI_N, REG1, REG2;\
153 cmp REG1, REG2; \
154 bne,pn %icc, 99b; \
155 nop; \
156 TSB_MEMBAR
157
158#define KTSB_WRITE(TSB, TTE, TAG) \
159 add TSB, 0x8, TSB; \
160 stxa TTE, [TSB] ASI_N; \
161 sub TSB, 0x8, TSB; \
162 TSB_MEMBAR; \
163 stxa TAG, [TSB] ASI_N;
164
165 /* Do a kernel page table walk. Leaves physical PTE pointer in
166 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
167 * VADDR will not be clobbered, but REG2 will.
168 */
169#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
170 sethi %hi(swapper_pg_dir), REG1; \
171 or REG1, %lo(swapper_pg_dir), REG1; \
172 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
173 srlx REG2, 64 - PAGE_SHIFT, REG2; \
174 andn REG2, 0x3, REG2; \
175 lduw [REG1 + REG2], REG1; \
176 brz,pn REG1, FAIL_LABEL; \
177 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
178 srlx REG2, 64 - PAGE_SHIFT, REG2; \
179 sllx REG1, 11, REG1; \
180 andn REG2, 0x3, REG2; \
181 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
182 brz,pn REG1, FAIL_LABEL; \
183 sllx VADDR, 64 - PMD_SHIFT, REG2; \
184 srlx REG2, 64 - PAGE_SHIFT, REG2; \
185 sllx REG1, 11, REG1; \
186 andn REG2, 0x7, REG2; \
187 add REG1, REG2, REG1;
188
189 /* Do a user page table walk in MMU globals. Leaves physical PTE
190 * pointer in REG1. Jumps to FAIL_LABEL on early page table walk
191 * termination. Physical base of page tables is in PHYS_PGD which
192 * will not be modified.
193 *
194 * VADDR will not be clobbered, but REG1 and REG2 will.
195 */
196#define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
197 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
198 srlx REG2, 64 - PAGE_SHIFT, REG2; \
199 andn REG2, 0x3, REG2; \
200 lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
201 brz,pn REG1, FAIL_LABEL; \
202 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
203 srlx REG2, 64 - PAGE_SHIFT, REG2; \
204 sllx REG1, 11, REG1; \
205 andn REG2, 0x3, REG2; \
206 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
207 brz,pn REG1, FAIL_LABEL; \
208 sllx VADDR, 64 - PMD_SHIFT, REG2; \
209 srlx REG2, 64 - PAGE_SHIFT, REG2; \
210 sllx REG1, 11, REG1; \
211 andn REG2, 0x7, REG2; \
212 add REG1, REG2, REG1;
213
214/* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
215 * If no entry is found, FAIL_LABEL will be branched to. On success
216 * the resulting PTE value will be left in REG1. VADDR is preserved
217 * by this routine.
218 */
219#define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \
220 sethi %hi(prom_trans), REG1; \
221 or REG1, %lo(prom_trans), REG1; \
22297: ldx [REG1 + 0x00], REG2; \
223 brz,pn REG2, FAIL_LABEL; \
224 nop; \
225 ldx [REG1 + 0x08], REG3; \
226 add REG2, REG3, REG3; \
227 cmp REG2, VADDR; \
228 bgu,pt %xcc, 98f; \
229 cmp VADDR, REG3; \
230 bgeu,pt %xcc, 98f; \
231 ldx [REG1 + 0x10], REG3; \
232 sub VADDR, REG2, REG2; \
233 ba,pt %xcc, 99f; \
234 add REG3, REG2, REG1; \
23598: ba,pt %xcc, 97b; \
236 add REG1, (3 * 8), REG1; \
23799:
238
239 /* We use a 32K TSB for the whole kernel, this allows to
240 * handle about 16MB of modules and vmalloc mappings without
241 * incurring many hash conflicts.
242 */
243#define KERNEL_TSB_SIZE_BYTES (32 * 1024)
244#define KERNEL_TSB_NENTRIES \
245 (KERNEL_TSB_SIZE_BYTES / 16)
246#define KERNEL_TSB4M_NENTRIES 4096
247
248 /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
249 * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
250 * and the found TTE will be left in REG1. REG3 and REG4 must
251 * be an even/odd pair of registers.
252 *
253 * VADDR and TAG will be preserved and not clobbered by this macro.
254 */
255#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
256 sethi %hi(swapper_tsb), REG1; \
257 or REG1, %lo(swapper_tsb), REG1; \
258 srlx VADDR, PAGE_SHIFT, REG2; \
259 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
260 sllx REG2, 4, REG2; \
261 add REG1, REG2, REG2; \
262 KTSB_LOAD_QUAD(REG2, REG3); \
263 cmp REG3, TAG; \
264 be,a,pt %xcc, OK_LABEL; \
265 mov REG4, REG1;
266
267 /* This version uses a trick, the TAG is already (VADDR >> 22) so
268 * we can make use of that for the index computation.
269 */
270#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
271 sethi %hi(swapper_4m_tsb), REG1; \
272 or REG1, %lo(swapper_4m_tsb), REG1; \
273 and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \
274 sllx REG2, 4, REG2; \
275 add REG1, REG2, REG2; \
276 KTSB_LOAD_QUAD(REG2, REG3); \
277 cmp REG3, TAG; \
278 be,a,pt %xcc, OK_LABEL; \
279 mov REG4, REG1;
280
281#endif /* !(_SPARC64_TSB_H) */
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index 2784f80094c3..2d5e3c464df5 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -93,7 +93,7 @@
93 93
94#define SYSCALL_TRAP(routine, systbl) \ 94#define SYSCALL_TRAP(routine, systbl) \
95 sethi %hi(109f), %g7; \ 95 sethi %hi(109f), %g7; \
96 ba,pt %xcc, scetrap; \ 96 ba,pt %xcc, etrap; \
97109: or %g7, %lo(109b), %g7; \ 97109: or %g7, %lo(109b), %g7; \
98 sethi %hi(systbl), %l7; \ 98 sethi %hi(systbl), %l7; \
99 ba,pt %xcc, routine; \ 99 ba,pt %xcc, routine; \
@@ -109,14 +109,14 @@
109 nop;nop;nop; 109 nop;nop;nop;
110 110
111#define TRAP_UTRAP(handler,lvl) \ 111#define TRAP_UTRAP(handler,lvl) \
112 ldx [%g6 + TI_UTRAPS], %g1; \ 112 mov handler, %g3; \
113 sethi %hi(109f), %g7; \ 113 ba,pt %xcc, utrap_trap; \
114 brz,pn %g1, utrap; \ 114 mov lvl, %g4; \
115 or %g7, %lo(109f), %g7; \ 115 nop; \
116 ba,pt %xcc, utrap; \ 116 nop; \
117109: ldx [%g1 + handler*8], %g1; \ 117 nop; \
118 ba,pt %xcc, utrap_ill; \ 118 nop; \
119 mov lvl, %o1; 119 nop;
120 120
121#ifdef CONFIG_SUNOS_EMUL 121#ifdef CONFIG_SUNOS_EMUL
122#define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table) 122#define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table)
@@ -136,8 +136,6 @@
136#else 136#else
137#define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall) 137#define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall)
138#endif 138#endif
139/* FIXME: Write these actually */
140#define NETBSD_SYSCALL_TRAP TRAP(netbsd_syscall)
141#define BREAKPOINT_TRAP TRAP(breakpoint_trap) 139#define BREAKPOINT_TRAP TRAP(breakpoint_trap)
142 140
143#define TRAP_IRQ(routine, level) \ 141#define TRAP_IRQ(routine, level) \
@@ -182,6 +180,26 @@
182#define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) 180#define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
183#endif 181#endif
184 182
183#define SUN4V_ITSB_MISS \
184 ldxa [%g0] ASI_SCRATCHPAD, %g2; \
185 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \
186 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \
187 srlx %g4, 22, %g6; \
188 ba,pt %xcc, sun4v_itsb_miss; \
189 nop; \
190 nop; \
191 nop;
192
193#define SUN4V_DTSB_MISS \
194 ldxa [%g0] ASI_SCRATCHPAD, %g2; \
195 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \
196 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \
197 srlx %g4, 22, %g6; \
198 ba,pt %xcc, sun4v_dtsb_miss; \
199 nop; \
200 nop; \
201 nop;
202
185/* Before touching these macros, you owe it to yourself to go and 203/* Before touching these macros, you owe it to yourself to go and
186 * see how arch/sparc64/kernel/winfixup.S works... -DaveM 204 * see how arch/sparc64/kernel/winfixup.S works... -DaveM
187 * 205 *
@@ -221,6 +239,31 @@
221 saved; retry; nop; nop; nop; nop; nop; nop; \ 239 saved; retry; nop; nop; nop; nop; nop; nop; \
222 nop; nop; nop; nop; nop; nop; nop; nop; 240 nop; nop; nop; nop; nop; nop; nop; nop;
223 241
242#define SPILL_0_NORMAL_ETRAP \
243etrap_kernel_spill: \
244 stx %l0, [%sp + STACK_BIAS + 0x00]; \
245 stx %l1, [%sp + STACK_BIAS + 0x08]; \
246 stx %l2, [%sp + STACK_BIAS + 0x10]; \
247 stx %l3, [%sp + STACK_BIAS + 0x18]; \
248 stx %l4, [%sp + STACK_BIAS + 0x20]; \
249 stx %l5, [%sp + STACK_BIAS + 0x28]; \
250 stx %l6, [%sp + STACK_BIAS + 0x30]; \
251 stx %l7, [%sp + STACK_BIAS + 0x38]; \
252 stx %i0, [%sp + STACK_BIAS + 0x40]; \
253 stx %i1, [%sp + STACK_BIAS + 0x48]; \
254 stx %i2, [%sp + STACK_BIAS + 0x50]; \
255 stx %i3, [%sp + STACK_BIAS + 0x58]; \
256 stx %i4, [%sp + STACK_BIAS + 0x60]; \
257 stx %i5, [%sp + STACK_BIAS + 0x68]; \
258 stx %i6, [%sp + STACK_BIAS + 0x70]; \
259 stx %i7, [%sp + STACK_BIAS + 0x78]; \
260 saved; \
261 sub %g1, 2, %g1; \
262 ba,pt %xcc, etrap_save; \
263 wrpr %g1, %cwp; \
264 nop; nop; nop; nop; nop; nop; nop; nop; \
265 nop; nop; nop; nop;
266
224/* Normal 64bit spill */ 267/* Normal 64bit spill */
225#define SPILL_1_GENERIC(ASI) \ 268#define SPILL_1_GENERIC(ASI) \
226 add %sp, STACK_BIAS + 0x00, %g1; \ 269 add %sp, STACK_BIAS + 0x00, %g1; \
@@ -254,6 +297,67 @@
254 b,a,pt %xcc, spill_fixup_mna; \ 297 b,a,pt %xcc, spill_fixup_mna; \
255 b,a,pt %xcc, spill_fixup; 298 b,a,pt %xcc, spill_fixup;
256 299
300#define SPILL_1_GENERIC_ETRAP \
301etrap_user_spill_64bit: \
302 stxa %l0, [%sp + STACK_BIAS + 0x00] %asi; \
303 stxa %l1, [%sp + STACK_BIAS + 0x08] %asi; \
304 stxa %l2, [%sp + STACK_BIAS + 0x10] %asi; \
305 stxa %l3, [%sp + STACK_BIAS + 0x18] %asi; \
306 stxa %l4, [%sp + STACK_BIAS + 0x20] %asi; \
307 stxa %l5, [%sp + STACK_BIAS + 0x28] %asi; \
308 stxa %l6, [%sp + STACK_BIAS + 0x30] %asi; \
309 stxa %l7, [%sp + STACK_BIAS + 0x38] %asi; \
310 stxa %i0, [%sp + STACK_BIAS + 0x40] %asi; \
311 stxa %i1, [%sp + STACK_BIAS + 0x48] %asi; \
312 stxa %i2, [%sp + STACK_BIAS + 0x50] %asi; \
313 stxa %i3, [%sp + STACK_BIAS + 0x58] %asi; \
314 stxa %i4, [%sp + STACK_BIAS + 0x60] %asi; \
315 stxa %i5, [%sp + STACK_BIAS + 0x68] %asi; \
316 stxa %i6, [%sp + STACK_BIAS + 0x70] %asi; \
317 stxa %i7, [%sp + STACK_BIAS + 0x78] %asi; \
318 saved; \
319 sub %g1, 2, %g1; \
320 ba,pt %xcc, etrap_save; \
321 wrpr %g1, %cwp; \
322 nop; nop; nop; nop; nop; \
323 nop; nop; nop; nop; \
324 ba,a,pt %xcc, etrap_spill_fixup_64bit; \
325 ba,a,pt %xcc, etrap_spill_fixup_64bit; \
326 ba,a,pt %xcc, etrap_spill_fixup_64bit;
327
328#define SPILL_1_GENERIC_ETRAP_FIXUP \
329etrap_spill_fixup_64bit: \
330 ldub [%g6 + TI_WSAVED], %g1; \
331 sll %g1, 3, %g3; \
332 add %g6, %g3, %g3; \
333 stx %sp, [%g3 + TI_RWIN_SPTRS]; \
334 sll %g1, 7, %g3; \
335 add %g6, %g3, %g3; \
336 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]; \
337 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]; \
338 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]; \
339 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]; \
340 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]; \
341 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]; \
342 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]; \
343 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]; \
344 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]; \
345 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]; \
346 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]; \
347 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]; \
348 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]; \
349 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]; \
350 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]; \
351 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]; \
352 add %g1, 1, %g1; \
353 stb %g1, [%g6 + TI_WSAVED]; \
354 saved; \
355 rdpr %cwp, %g1; \
356 sub %g1, 2, %g1; \
357 ba,pt %xcc, etrap_save; \
358 wrpr %g1, %cwp; \
359 nop; nop; nop
360
257/* Normal 32bit spill */ 361/* Normal 32bit spill */
258#define SPILL_2_GENERIC(ASI) \ 362#define SPILL_2_GENERIC(ASI) \
259 srl %sp, 0, %sp; \ 363 srl %sp, 0, %sp; \
@@ -287,6 +391,68 @@
287 b,a,pt %xcc, spill_fixup_mna; \ 391 b,a,pt %xcc, spill_fixup_mna; \
288 b,a,pt %xcc, spill_fixup; 392 b,a,pt %xcc, spill_fixup;
289 393
394#define SPILL_2_GENERIC_ETRAP \
395etrap_user_spill_32bit: \
396 srl %sp, 0, %sp; \
397 stwa %l0, [%sp + 0x00] %asi; \
398 stwa %l1, [%sp + 0x04] %asi; \
399 stwa %l2, [%sp + 0x08] %asi; \
400 stwa %l3, [%sp + 0x0c] %asi; \
401 stwa %l4, [%sp + 0x10] %asi; \
402 stwa %l5, [%sp + 0x14] %asi; \
403 stwa %l6, [%sp + 0x18] %asi; \
404 stwa %l7, [%sp + 0x1c] %asi; \
405 stwa %i0, [%sp + 0x20] %asi; \
406 stwa %i1, [%sp + 0x24] %asi; \
407 stwa %i2, [%sp + 0x28] %asi; \
408 stwa %i3, [%sp + 0x2c] %asi; \
409 stwa %i4, [%sp + 0x30] %asi; \
410 stwa %i5, [%sp + 0x34] %asi; \
411 stwa %i6, [%sp + 0x38] %asi; \
412 stwa %i7, [%sp + 0x3c] %asi; \
413 saved; \
414 sub %g1, 2, %g1; \
415 ba,pt %xcc, etrap_save; \
416 wrpr %g1, %cwp; \
417 nop; nop; nop; nop; \
418 nop; nop; nop; nop; \
419 ba,a,pt %xcc, etrap_spill_fixup_32bit; \
420 ba,a,pt %xcc, etrap_spill_fixup_32bit; \
421 ba,a,pt %xcc, etrap_spill_fixup_32bit;
422
423#define SPILL_2_GENERIC_ETRAP_FIXUP \
424etrap_spill_fixup_32bit: \
425 ldub [%g6 + TI_WSAVED], %g1; \
426 sll %g1, 3, %g3; \
427 add %g6, %g3, %g3; \
428 stx %sp, [%g3 + TI_RWIN_SPTRS]; \
429 sll %g1, 7, %g3; \
430 add %g6, %g3, %g3; \
431 stw %l0, [%g3 + TI_REG_WINDOW + 0x00]; \
432 stw %l1, [%g3 + TI_REG_WINDOW + 0x04]; \
433 stw %l2, [%g3 + TI_REG_WINDOW + 0x08]; \
434 stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]; \
435 stw %l4, [%g3 + TI_REG_WINDOW + 0x10]; \
436 stw %l5, [%g3 + TI_REG_WINDOW + 0x14]; \
437 stw %l6, [%g3 + TI_REG_WINDOW + 0x18]; \
438 stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]; \
439 stw %i0, [%g3 + TI_REG_WINDOW + 0x20]; \
440 stw %i1, [%g3 + TI_REG_WINDOW + 0x24]; \
441 stw %i2, [%g3 + TI_REG_WINDOW + 0x28]; \
442 stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]; \
443 stw %i4, [%g3 + TI_REG_WINDOW + 0x30]; \
444 stw %i5, [%g3 + TI_REG_WINDOW + 0x34]; \
445 stw %i6, [%g3 + TI_REG_WINDOW + 0x38]; \
446 stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]; \
447 add %g1, 1, %g1; \
448 stb %g1, [%g6 + TI_WSAVED]; \
449 saved; \
450 rdpr %cwp, %g1; \
451 sub %g1, 2, %g1; \
452 ba,pt %xcc, etrap_save; \
453 wrpr %g1, %cwp; \
454 nop; nop; nop
455
290#define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP) 456#define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP)
291#define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP) 457#define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP)
292#define SPILL_3_NORMAL SPILL_0_NORMAL 458#define SPILL_3_NORMAL SPILL_0_NORMAL
@@ -325,6 +491,35 @@
325 restored; retry; nop; nop; nop; nop; nop; nop; \ 491 restored; retry; nop; nop; nop; nop; nop; nop; \
326 nop; nop; nop; nop; nop; nop; nop; nop; 492 nop; nop; nop; nop; nop; nop; nop; nop;
327 493
494#define FILL_0_NORMAL_RTRAP \
495kern_rtt_fill: \
496 rdpr %cwp, %g1; \
497 sub %g1, 1, %g1; \
498 wrpr %g1, %cwp; \
499 ldx [%sp + STACK_BIAS + 0x00], %l0; \
500 ldx [%sp + STACK_BIAS + 0x08], %l1; \
501 ldx [%sp + STACK_BIAS + 0x10], %l2; \
502 ldx [%sp + STACK_BIAS + 0x18], %l3; \
503 ldx [%sp + STACK_BIAS + 0x20], %l4; \
504 ldx [%sp + STACK_BIAS + 0x28], %l5; \
505 ldx [%sp + STACK_BIAS + 0x30], %l6; \
506 ldx [%sp + STACK_BIAS + 0x38], %l7; \
507 ldx [%sp + STACK_BIAS + 0x40], %i0; \
508 ldx [%sp + STACK_BIAS + 0x48], %i1; \
509 ldx [%sp + STACK_BIAS + 0x50], %i2; \
510 ldx [%sp + STACK_BIAS + 0x58], %i3; \
511 ldx [%sp + STACK_BIAS + 0x60], %i4; \
512 ldx [%sp + STACK_BIAS + 0x68], %i5; \
513 ldx [%sp + STACK_BIAS + 0x70], %i6; \
514 ldx [%sp + STACK_BIAS + 0x78], %i7; \
515 restored; \
516 add %g1, 1, %g1; \
517 ba,pt %xcc, kern_rtt_restore; \
518 wrpr %g1, %cwp; \
519 nop; nop; nop; nop; nop; \
520 nop; nop; nop; nop;
521
522
328/* Normal 64bit fill */ 523/* Normal 64bit fill */
329#define FILL_1_GENERIC(ASI) \ 524#define FILL_1_GENERIC(ASI) \
330 add %sp, STACK_BIAS + 0x00, %g1; \ 525 add %sp, STACK_BIAS + 0x00, %g1; \
@@ -356,6 +551,33 @@
356 b,a,pt %xcc, fill_fixup_mna; \ 551 b,a,pt %xcc, fill_fixup_mna; \
357 b,a,pt %xcc, fill_fixup; 552 b,a,pt %xcc, fill_fixup;
358 553
554#define FILL_1_GENERIC_RTRAP \
555user_rtt_fill_64bit: \
556 ldxa [%sp + STACK_BIAS + 0x00] %asi, %l0; \
557 ldxa [%sp + STACK_BIAS + 0x08] %asi, %l1; \
558 ldxa [%sp + STACK_BIAS + 0x10] %asi, %l2; \
559 ldxa [%sp + STACK_BIAS + 0x18] %asi, %l3; \
560 ldxa [%sp + STACK_BIAS + 0x20] %asi, %l4; \
561 ldxa [%sp + STACK_BIAS + 0x28] %asi, %l5; \
562 ldxa [%sp + STACK_BIAS + 0x30] %asi, %l6; \
563 ldxa [%sp + STACK_BIAS + 0x38] %asi, %l7; \
564 ldxa [%sp + STACK_BIAS + 0x40] %asi, %i0; \
565 ldxa [%sp + STACK_BIAS + 0x48] %asi, %i1; \
566 ldxa [%sp + STACK_BIAS + 0x50] %asi, %i2; \
567 ldxa [%sp + STACK_BIAS + 0x58] %asi, %i3; \
568 ldxa [%sp + STACK_BIAS + 0x60] %asi, %i4; \
569 ldxa [%sp + STACK_BIAS + 0x68] %asi, %i5; \
570 ldxa [%sp + STACK_BIAS + 0x70] %asi, %i6; \
571 ldxa [%sp + STACK_BIAS + 0x78] %asi, %i7; \
572 ba,pt %xcc, user_rtt_pre_restore; \
573 restored; \
574 nop; nop; nop; nop; nop; nop; \
575 nop; nop; nop; nop; nop; \
576 ba,a,pt %xcc, user_rtt_fill_fixup; \
577 ba,a,pt %xcc, user_rtt_fill_fixup; \
578 ba,a,pt %xcc, user_rtt_fill_fixup;
579
580
359/* Normal 32bit fill */ 581/* Normal 32bit fill */
360#define FILL_2_GENERIC(ASI) \ 582#define FILL_2_GENERIC(ASI) \
361 srl %sp, 0, %sp; \ 583 srl %sp, 0, %sp; \
@@ -387,6 +609,34 @@
387 b,a,pt %xcc, fill_fixup_mna; \ 609 b,a,pt %xcc, fill_fixup_mna; \
388 b,a,pt %xcc, fill_fixup; 610 b,a,pt %xcc, fill_fixup;
389 611
612#define FILL_2_GENERIC_RTRAP \
613user_rtt_fill_32bit: \
614 srl %sp, 0, %sp; \
615 lduwa [%sp + 0x00] %asi, %l0; \
616 lduwa [%sp + 0x04] %asi, %l1; \
617 lduwa [%sp + 0x08] %asi, %l2; \
618 lduwa [%sp + 0x0c] %asi, %l3; \
619 lduwa [%sp + 0x10] %asi, %l4; \
620 lduwa [%sp + 0x14] %asi, %l5; \
621 lduwa [%sp + 0x18] %asi, %l6; \
622 lduwa [%sp + 0x1c] %asi, %l7; \
623 lduwa [%sp + 0x20] %asi, %i0; \
624 lduwa [%sp + 0x24] %asi, %i1; \
625 lduwa [%sp + 0x28] %asi, %i2; \
626 lduwa [%sp + 0x2c] %asi, %i3; \
627 lduwa [%sp + 0x30] %asi, %i4; \
628 lduwa [%sp + 0x34] %asi, %i5; \
629 lduwa [%sp + 0x38] %asi, %i6; \
630 lduwa [%sp + 0x3c] %asi, %i7; \
631 ba,pt %xcc, user_rtt_pre_restore; \
632 restored; \
633 nop; nop; nop; nop; nop; \
634 nop; nop; nop; nop; nop; \
635 ba,a,pt %xcc, user_rtt_fill_fixup; \
636 ba,a,pt %xcc, user_rtt_fill_fixup; \
637 ba,a,pt %xcc, user_rtt_fill_fixup;
638
639
390#define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP) 640#define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP)
391#define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP) 641#define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP)
392#define FILL_3_NORMAL FILL_0_NORMAL 642#define FILL_3_NORMAL FILL_0_NORMAL
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h
index c91d1e38eac6..afe236ba555b 100644
--- a/include/asm-sparc64/uaccess.h
+++ b/include/asm-sparc64/uaccess.h
@@ -114,16 +114,6 @@ case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
114default: __pu_ret = __put_user_bad(); break; \ 114default: __pu_ret = __put_user_bad(); break; \
115} __pu_ret; }) 115} __pu_ret; })
116 116
117#define __put_user_nocheck_ret(data,addr,size,retval) ({ \
118register int __foo __asm__ ("l1"); \
119switch (size) { \
120case 1: __put_user_asm_ret(data,b,addr,retval,__foo); break; \
121case 2: __put_user_asm_ret(data,h,addr,retval,__foo); break; \
122case 4: __put_user_asm_ret(data,w,addr,retval,__foo); break; \
123case 8: __put_user_asm_ret(data,x,addr,retval,__foo); break; \
124default: if (__put_user_bad()) return retval; break; \
125} })
126
127#define __put_user_asm(x,size,addr,ret) \ 117#define __put_user_asm(x,size,addr,ret) \
128__asm__ __volatile__( \ 118__asm__ __volatile__( \
129 "/* Put user asm, inline. */\n" \ 119 "/* Put user asm, inline. */\n" \
@@ -143,33 +133,6 @@ __asm__ __volatile__( \
143 : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 133 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
144 "i" (-EFAULT)) 134 "i" (-EFAULT))
145 135
146#define __put_user_asm_ret(x,size,addr,ret,foo) \
147if (__builtin_constant_p(ret) && ret == -EFAULT) \
148__asm__ __volatile__( \
149 "/* Put user asm ret, inline. */\n" \
150"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \
151 ".section __ex_table,\"a\"\n\t" \
152 ".align 4\n\t" \
153 ".word 1b, __ret_efault\n\n\t" \
154 ".previous\n\n\t" \
155 : "=r" (foo) : "r" (x), "r" (__m(addr))); \
156else \
157__asm__ __volatile__( \
158 "/* Put user asm ret, inline. */\n" \
159"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \
160 ".section .fixup,#alloc,#execinstr\n\t" \
161 ".align 4\n" \
162"3:\n\t" \
163 "ret\n\t" \
164 " restore %%g0, %3, %%o0\n\n\t" \
165 ".previous\n\t" \
166 ".section __ex_table,\"a\"\n\t" \
167 ".align 4\n\t" \
168 ".word 1b, 3b\n\n\t" \
169 ".previous\n\n\t" \
170 : "=r" (foo) : "r" (x), "r" (__m(addr)), \
171 "i" (ret))
172
173extern int __put_user_bad(void); 136extern int __put_user_bad(void);
174 137
175#define __get_user_nocheck(data,addr,size,type) ({ \ 138#define __get_user_nocheck(data,addr,size,type) ({ \
@@ -289,14 +252,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
289} 252}
290#define __copy_in_user copy_in_user 253#define __copy_in_user copy_in_user
291 254
292extern unsigned long __must_check __bzero_noasi(void __user *, unsigned long); 255extern unsigned long __must_check __clear_user(void __user *, unsigned long);
293
294static inline unsigned long __must_check
295__clear_user(void __user *addr, unsigned long size)
296{
297
298 return __bzero_noasi(addr, size);
299}
300 256
301#define clear_user __clear_user 257#define clear_user __clear_user
302 258
diff --git a/include/asm-sparc64/vdev.h b/include/asm-sparc64/vdev.h
new file mode 100644
index 000000000000..996e6be7b976
--- /dev/null
+++ b/include/asm-sparc64/vdev.h
@@ -0,0 +1,16 @@
1/* vdev.h: SUN4V virtual device interfaces and defines.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#ifndef _SPARC64_VDEV_H
7#define _SPARC64_VDEV_H
8
9#include <linux/types.h>
10
11extern u32 sun4v_vdev_devhandle;
12extern int sun4v_vdev_root;
13
14extern unsigned int sun4v_vdev_device_interrupt(unsigned int);
15
16#endif /* !(_SPARC64_VDEV_H) */
diff --git a/include/asm-sparc64/xor.h b/include/asm-sparc64/xor.h
index 8b3a7e4b6062..8ce3f1813e28 100644
--- a/include/asm-sparc64/xor.h
+++ b/include/asm-sparc64/xor.h
@@ -2,9 +2,11 @@
2 * include/asm-sparc64/xor.h 2 * include/asm-sparc64/xor.h
3 * 3 *
4 * High speed xor_block operation for RAID4/5 utilizing the 4 * High speed xor_block operation for RAID4/5 utilizing the
5 * UltraSparc Visual Instruction Set. 5 * UltraSparc Visual Instruction Set and Niagara block-init
6 * twin-load instructions.
6 * 7 *
7 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) 8 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
9 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
8 * 10 *
9 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
@@ -16,8 +18,7 @@
16 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 19 */
18 20
19#include <asm/pstate.h> 21#include <asm/spitfire.h>
20#include <asm/asi.h>
21 22
22extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 23extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
23extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, 24extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
@@ -37,4 +38,29 @@ static struct xor_block_template xor_block_VIS = {
37 .do_5 = xor_vis_5, 38 .do_5 = xor_vis_5,
38}; 39};
39 40
40#define XOR_TRY_TEMPLATES xor_speed(&xor_block_VIS) 41extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
42extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
43 unsigned long *);
44extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
45 unsigned long *, unsigned long *);
46extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
47 unsigned long *, unsigned long *, unsigned long *);
48
49static struct xor_block_template xor_block_niagara = {
50 .name = "Niagara",
51 .do_2 = xor_niagara_2,
52 .do_3 = xor_niagara_3,
53 .do_4 = xor_niagara_4,
54 .do_5 = xor_niagara_5,
55};
56
57#undef XOR_TRY_TEMPLATES
58#define XOR_TRY_TEMPLATES \
59 do { \
60 xor_speed(&xor_block_VIS); \
61 xor_speed(&xor_block_niagara); \
62 } while (0)
63
64/* For VIS for everything except Niagara. */
65#define XOR_SELECT_TEMPLATE(FASTEST) \
66 (tlb_type == hypervisor ? &xor_block_niagara : &xor_block_VIS)
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index 6b8d73dc1ab0..9cf64b1b688b 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -54,6 +54,7 @@
54#define CNTL_LCDBPP4 (2 << 1) 54#define CNTL_LCDBPP4 (2 << 1)
55#define CNTL_LCDBPP8 (3 << 1) 55#define CNTL_LCDBPP8 (3 << 1)
56#define CNTL_LCDBPP16 (4 << 1) 56#define CNTL_LCDBPP16 (4 << 1)
57#define CNTL_LCDBPP16_565 (6 << 1)
57#define CNTL_LCDBPP24 (5 << 1) 58#define CNTL_LCDBPP24 (5 << 1)
58#define CNTL_LCDBW (1 << 4) 59#define CNTL_LCDBW (1 << 4)
59#define CNTL_LCDTFT (1 << 5) 60#define CNTL_LCDTFT (1 << 5)
@@ -209,7 +210,16 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
209 val |= CNTL_LCDBPP8; 210 val |= CNTL_LCDBPP8;
210 break; 211 break;
211 case 16: 212 case 16:
212 val |= CNTL_LCDBPP16; 213 /*
214 * PL110 cannot choose between 5551 and 565 modes in
215 * its control register
216 */
217 if ((fb->dev->periphid & 0x000fffff) == 0x00041110)
218 val |= CNTL_LCDBPP16;
219 else if (fb->fb.var.green.length == 5)
220 val |= CNTL_LCDBPP16;
221 else
222 val |= CNTL_LCDBPP16_565;
213 break; 223 break;
214 case 32: 224 case 32:
215 val |= CNTL_LCDBPP24; 225 val |= CNTL_LCDBPP24;
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h
index 7198f129e135..231ba090ae34 100644
--- a/include/linux/arcdevice.h
+++ b/include/linux/arcdevice.h
@@ -206,7 +206,6 @@ struct ArcProto {
206 206
207extern struct ArcProto *arc_proto_map[256], *arc_proto_default, 207extern struct ArcProto *arc_proto_map[256], *arc_proto_default,
208 *arc_bcast_proto, *arc_raw_proto; 208 *arc_bcast_proto, *arc_raw_proto;
209extern struct ArcProto arc_proto_null;
210 209
211 210
212/* 211/*
@@ -334,17 +333,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
334#define arcnet_dump_skb(dev,skb,desc) ; 333#define arcnet_dump_skb(dev,skb,desc) ;
335#endif 334#endif
336 335
337#if (ARCNET_DEBUG_MAX & D_RX) || (ARCNET_DEBUG_MAX & D_TX)
338void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc,
339 int take_arcnet_lock);
340#else
341#define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) ;
342#endif
343
344void arcnet_unregister_proto(struct ArcProto *proto); 336void arcnet_unregister_proto(struct ArcProto *proto);
345irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); 337irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs);
346struct net_device *alloc_arcdev(char *name); 338struct net_device *alloc_arcdev(char *name);
347void arcnet_rx(struct net_device *dev, int bufnum);
348 339
349#endif /* __KERNEL__ */ 340#endif /* __KERNEL__ */
350#endif /* _LINUX_ARCDEVICE_H */ 341#endif /* _LINUX_ARCDEVICE_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 94f77cce27fa..b02a16c435e7 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -267,6 +267,16 @@ struct ata_taskfile {
267 ((u64) (id)[(n) + 1] << 16) | \ 267 ((u64) (id)[(n) + 1] << 16) | \
268 ((u64) (id)[(n) + 0]) ) 268 ((u64) (id)[(n) + 0]) )
269 269
270static inline unsigned int ata_id_major_version(const u16 *id)
271{
272 unsigned int mver;
273
274 for (mver = 14; mver >= 1; mver--)
275 if (id[ATA_ID_MAJOR_VER] & (1 << mver))
276 break;
277 return mver;
278}
279
270static inline int ata_id_current_chs_valid(const u16 *id) 280static inline int ata_id_current_chs_valid(const u16 *id)
271{ 281{
272 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 282 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -302,4 +312,16 @@ static inline int ata_ok(u8 status)
302 == ATA_DRDY); 312 == ATA_DRDY);
303} 313}
304 314
315static inline int lba_28_ok(u64 block, u32 n_block)
316{
317 /* check the ending block number */
318 return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
319}
320
321static inline int lba_48_ok(u64 block, u32 n_block)
322{
323 /* check the ending block number */
324 return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
325}
326
305#endif /* __LINUX_ATA_H__ */ 327#endif /* __LINUX_ATA_H__ */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 860e7a485a5f..56bb6a4e15f3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -58,7 +58,7 @@ struct cfq_io_context {
58 * circular list of cfq_io_contexts belonging to a process io context 58 * circular list of cfq_io_contexts belonging to a process io context
59 */ 59 */
60 struct list_head list; 60 struct list_head list;
61 struct cfq_queue *cfqq; 61 struct cfq_queue *cfqq[2];
62 void *key; 62 void *key;
63 63
64 struct io_context *ioc; 64 struct io_context *ioc;
@@ -69,6 +69,8 @@ struct cfq_io_context {
69 unsigned long ttime_samples; 69 unsigned long ttime_samples;
70 unsigned long ttime_mean; 70 unsigned long ttime_mean;
71 71
72 struct list_head queue_list;
73
72 void (*dtor)(struct cfq_io_context *); 74 void (*dtor)(struct cfq_io_context *);
73 void (*exit)(struct cfq_io_context *); 75 void (*exit)(struct cfq_io_context *);
74}; 76};
@@ -404,8 +406,6 @@ struct request_queue
404 406
405 struct blk_queue_tag *queue_tags; 407 struct blk_queue_tag *queue_tags;
406 408
407 atomic_t refcnt;
408
409 unsigned int nr_sorted; 409 unsigned int nr_sorted;
410 unsigned int in_flight; 410 unsigned int in_flight;
411 411
@@ -424,6 +424,8 @@ struct request_queue
424 struct request pre_flush_rq, bar_rq, post_flush_rq; 424 struct request pre_flush_rq, bar_rq, post_flush_rq;
425 struct request *orig_bar_rq; 425 struct request *orig_bar_rq;
426 unsigned int bi_size; 426 unsigned int bi_size;
427
428 struct mutex sysfs_lock;
427}; 429};
428 430
429#define RQ_INACTIVE (-1) 431#define RQ_INACTIVE (-1)
@@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void);
725int blk_get_queue(request_queue_t *); 727int blk_get_queue(request_queue_t *);
726request_queue_t *blk_alloc_queue(gfp_t); 728request_queue_t *blk_alloc_queue(gfp_t);
727request_queue_t *blk_alloc_queue_node(gfp_t, int); 729request_queue_t *blk_alloc_queue_node(gfp_t, int);
728#define blk_put_queue(q) blk_cleanup_queue((q)) 730extern void blk_put_queue(request_queue_t *);
729 731
730/* 732/*
731 * tag stuff 733 * tag stuff
diff --git a/include/linux/dvb/audio.h b/include/linux/dvb/audio.h
index 2b8797084685..0874a67c6b92 100644
--- a/include/linux/dvb/audio.h
+++ b/include/linux/dvb/audio.h
@@ -121,4 +121,17 @@ typedef uint16_t audio_attributes_t;
121#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t) 121#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t)
122#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t) 122#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t)
123 123
124/**
125 * AUDIO_GET_PTS
126 *
127 * Read the 33 bit presentation time stamp as defined
128 * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
129 *
130 * The PTS should belong to the currently played
131 * frame if possible, but may also be a value close to it
132 * like the PTS of the last decoded frame or the last PTS
133 * extracted by the PES parser.
134 */
135#define AUDIO_GET_PTS _IOR('o', 19, __u64)
136
124#endif /* _DVBAUDIO_H_ */ 137#endif /* _DVBAUDIO_H_ */
diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h
index b81e58b2ebf8..faebfda397ff 100644
--- a/include/linux/dvb/video.h
+++ b/include/linux/dvb/video.h
@@ -200,4 +200,17 @@ typedef uint16_t video_attributes_t;
200#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t) 200#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t)
201#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int) 201#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int)
202 202
203/**
204 * VIDEO_GET_PTS
205 *
206 * Read the 33 bit presentation time stamp as defined
207 * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
208 *
209 * The PTS should belong to the currently played
210 * frame if possible, but may also be a value close to it
211 * like the PTS of the last decoded frame or the last PTS
212 * extracted by the PES parser.
213 */
214#define VIDEO_GET_PTS _IOR('o', 57, __u64)
215
203#endif /*_DVBVIDEO_H_*/ 216#endif /*_DVBVIDEO_H_*/
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 18cf1f3e1184..ad133fcfb239 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -48,10 +48,17 @@ struct elevator_ops
48 48
49 elevator_init_fn *elevator_init_fn; 49 elevator_init_fn *elevator_init_fn;
50 elevator_exit_fn *elevator_exit_fn; 50 elevator_exit_fn *elevator_exit_fn;
51 void (*trim)(struct io_context *);
51}; 52};
52 53
53#define ELV_NAME_MAX (16) 54#define ELV_NAME_MAX (16)
54 55
56struct elv_fs_entry {
57 struct attribute attr;
58 ssize_t (*show)(elevator_t *, char *);
59 ssize_t (*store)(elevator_t *, const char *, size_t);
60};
61
55/* 62/*
56 * identifies an elevator type, such as AS or deadline 63 * identifies an elevator type, such as AS or deadline
57 */ 64 */
@@ -60,7 +67,7 @@ struct elevator_type
60 struct list_head list; 67 struct list_head list;
61 struct elevator_ops ops; 68 struct elevator_ops ops;
62 struct elevator_type *elevator_type; 69 struct elevator_type *elevator_type;
63 struct kobj_type *elevator_ktype; 70 struct elv_fs_entry *elevator_attrs;
64 char elevator_name[ELV_NAME_MAX]; 71 char elevator_name[ELV_NAME_MAX];
65 struct module *elevator_owner; 72 struct module *elevator_owner;
66}; 73};
@@ -74,6 +81,7 @@ struct elevator_queue
74 void *elevator_data; 81 void *elevator_data;
75 struct kobject kobj; 82 struct kobject kobj;
76 struct elevator_type *elevator_type; 83 struct elevator_type *elevator_type;
84 struct mutex sysfs_lock;
77}; 85};
78 86
79/* 87/*
diff --git a/include/linux/if.h b/include/linux/if.h
index ce627d9092ef..12c6f6d157c3 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -52,6 +52,9 @@
52/* Private (from user) interface flags (netdevice->priv_flags). */ 52/* Private (from user) interface flags (netdevice->priv_flags). */
53#define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ 53#define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */
54#define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ 54#define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */
55#define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */
56#define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */
57#define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */
55 58
56#define IF_GET_IFACE 0x0001 /* for querying only */ 59#define IF_GET_IFACE 0x0001 /* for querying only */
57#define IF_GET_PROTO 0x0002 60#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 7a92c1ce1457..ab08f35cbc35 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -61,6 +61,7 @@
61#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ 61#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
62#define ETH_P_IPX 0x8137 /* IPX over DIX */ 62#define ETH_P_IPX 0x8137 /* IPX over DIX */
63#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ 63#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
64#define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */
64#define ETH_P_WCCP 0x883E /* Web-cache coordination protocol 65#define ETH_P_WCCP 0x883E /* Web-cache coordination protocol
65 * defined in draft-wilson-wrec-wccp-v2-00.txt */ 66 * defined in draft-wilson-wrec-wccp-v2-00.txt */
66#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ 67#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c91be5e64ede..239408ecfddf 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -35,7 +35,8 @@
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36 36
37/* 37/*
38 * compile-time options 38 * compile-time options: to be removed as soon as all the drivers are
39 * converted to the new debugging mechanism
39 */ 40 */
40#undef ATA_DEBUG /* debugging output */ 41#undef ATA_DEBUG /* debugging output */
41#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 42#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
@@ -61,15 +62,37 @@
61 62
62#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 63#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
63 64
64#ifdef ATA_NDEBUG 65/* NEW: debug levels */
65#define assert(expr) 66#define HAVE_LIBATA_MSG 1
66#else 67
67#define assert(expr) \ 68enum {
68 if(unlikely(!(expr))) { \ 69 ATA_MSG_DRV = 0x0001,
69 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ 70 ATA_MSG_INFO = 0x0002,
70 #expr,__FILE__,__FUNCTION__,__LINE__); \ 71 ATA_MSG_PROBE = 0x0004,
71 } 72 ATA_MSG_WARN = 0x0008,
72#endif 73 ATA_MSG_MALLOC = 0x0010,
74 ATA_MSG_CTL = 0x0020,
75 ATA_MSG_INTR = 0x0040,
76 ATA_MSG_ERR = 0x0080,
77};
78
79#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
80#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
81#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
82#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
83#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
84#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
85#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
86#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
87
88static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
89{
90 if (dval < 0 || dval >= (sizeof(u32) * 8))
91 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
92 if (!dval)
93 return 0;
94 return (1 << dval) - 1;
95}
73 96
74/* defines only for the constants which don't work well as enums */ 97/* defines only for the constants which don't work well as enums */
75#define ATA_TAG_POISON 0xfafbfcfdU 98#define ATA_TAG_POISON 0xfafbfcfdU
@@ -99,8 +122,7 @@ enum {
99 /* struct ata_device stuff */ 122 /* struct ata_device stuff */
100 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
101 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
102 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */
103 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
104 126
105 ATA_DEV_UNKNOWN = 0, /* unknown device */ 127 ATA_DEV_UNKNOWN = 0, /* unknown device */
106 ATA_DEV_ATA = 1, /* ATA device */ 128 ATA_DEV_ATA = 1, /* ATA device */
@@ -115,9 +137,9 @@ enum {
115 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ 137 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
116 ATA_FLAG_SATA = (1 << 3), 138 ATA_FLAG_SATA = (1 << 3),
117 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ 139 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
118 ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */ 140 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */
119 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 141 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
120 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 142 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */
121 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 143 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
122 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 144 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
123 * proper HSM is in place. */ 145 * proper HSM is in place. */
@@ -129,10 +151,14 @@ enum {
129 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ 151 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
130 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ 152 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
131 153
154 ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */
155 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */
156
132 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 157 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
133 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 158 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
134 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 159 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
135 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 160 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
161 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
136 162
137 /* various lengths of time */ 163 /* various lengths of time */
138 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */ 164 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
@@ -162,11 +188,19 @@ enum {
162 PORT_DISABLED = 2, 188 PORT_DISABLED = 2,
163 189
164 /* encoding various smaller bitmaps into a single 190 /* encoding various smaller bitmaps into a single
165 * unsigned long bitmap 191 * unsigned int bitmap
166 */ 192 */
167 ATA_SHIFT_UDMA = 0, 193 ATA_BITS_PIO = 5,
168 ATA_SHIFT_MWDMA = 8, 194 ATA_BITS_MWDMA = 3,
169 ATA_SHIFT_PIO = 11, 195 ATA_BITS_UDMA = 8,
196
197 ATA_SHIFT_PIO = 0,
198 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO,
199 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
200
201 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
202 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
203 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
170 204
171 /* size of buffer to pad xfers ending on unaligned boundaries */ 205 /* size of buffer to pad xfers ending on unaligned boundaries */
172 ATA_DMA_PAD_SZ = 4, 206 ATA_DMA_PAD_SZ = 4,
@@ -189,10 +223,15 @@ enum hsm_task_states {
189}; 223};
190 224
191enum ata_completion_errors { 225enum ata_completion_errors {
192 AC_ERR_OTHER = (1 << 0), 226 AC_ERR_DEV = (1 << 0), /* device reported error */
193 AC_ERR_DEV = (1 << 1), 227 AC_ERR_HSM = (1 << 1), /* host state machine violation */
194 AC_ERR_ATA_BUS = (1 << 2), 228 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
195 AC_ERR_HOST_BUS = (1 << 3), 229 AC_ERR_MEDIA = (1 << 3), /* media error */
230 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
231 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
232 AC_ERR_SYSTEM = (1 << 6), /* system error */
233 AC_ERR_INVALID = (1 << 7), /* invalid argument */
234 AC_ERR_OTHER = (1 << 8), /* unknown */
196}; 235};
197 236
198/* forward declarations */ 237/* forward declarations */
@@ -202,7 +241,10 @@ struct ata_port;
202struct ata_queued_cmd; 241struct ata_queued_cmd;
203 242
204/* typedefs */ 243/* typedefs */
205typedef int (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 244typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
245typedef void (*ata_probeinit_fn_t)(struct ata_port *);
246typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *);
247typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
206 248
207struct ata_ioports { 249struct ata_ioports {
208 unsigned long cmd_addr; 250 unsigned long cmd_addr;
@@ -305,7 +347,7 @@ struct ata_device {
305 unsigned long flags; /* ATA_DFLAG_xxx */ 347 unsigned long flags; /* ATA_DFLAG_xxx */
306 unsigned int class; /* ATA_DEV_xxx */ 348 unsigned int class; /* ATA_DEV_xxx */
307 unsigned int devno; /* 0 or 1 */ 349 unsigned int devno; /* 0 or 1 */
308 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 350 u16 *id; /* IDENTIFY xxx DEVICE data */
309 u8 pio_mode; 351 u8 pio_mode;
310 u8 dma_mode; 352 u8 dma_mode;
311 u8 xfer_mode; 353 u8 xfer_mode;
@@ -313,6 +355,8 @@ struct ata_device {
313 355
314 unsigned int multi_count; /* sectors count for 356 unsigned int multi_count; /* sectors count for
315 READ/WRITE MULTIPLE */ 357 READ/WRITE MULTIPLE */
358 unsigned int max_sectors; /* per-device max sectors */
359 unsigned int cdb_len;
316 360
317 /* for CHS addressing */ 361 /* for CHS addressing */
318 u16 cylinders; /* Number of cylinders */ 362 u16 cylinders; /* Number of cylinders */
@@ -342,7 +386,6 @@ struct ata_port {
342 unsigned int mwdma_mask; 386 unsigned int mwdma_mask;
343 unsigned int udma_mask; 387 unsigned int udma_mask;
344 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 388 unsigned int cbl; /* cable type; ATA_CBL_xxx */
345 unsigned int cdb_len;
346 389
347 struct ata_device device[ATA_MAX_DEVICES]; 390 struct ata_device device[ATA_MAX_DEVICES];
348 391
@@ -353,12 +396,14 @@ struct ata_port {
353 struct ata_host_stats stats; 396 struct ata_host_stats stats;
354 struct ata_host_set *host_set; 397 struct ata_host_set *host_set;
355 398
356 struct work_struct packet_task; 399 struct work_struct port_task;
357 400
358 struct work_struct pio_task;
359 unsigned int hsm_task_state; 401 unsigned int hsm_task_state;
360 unsigned long pio_task_timeout; 402 unsigned long pio_task_timeout;
361 403
404 u32 msg_enable;
405 struct list_head eh_done_q;
406
362 void *private_data; 407 void *private_data;
363}; 408};
364 409
@@ -378,7 +423,9 @@ struct ata_port_operations {
378 u8 (*check_altstatus)(struct ata_port *ap); 423 u8 (*check_altstatus)(struct ata_port *ap);
379 void (*dev_select)(struct ata_port *ap, unsigned int device); 424 void (*dev_select)(struct ata_port *ap, unsigned int device);
380 425
381 void (*phy_reset) (struct ata_port *ap); 426 void (*phy_reset) (struct ata_port *ap); /* obsolete */
427 int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
428
382 void (*post_set_mode) (struct ata_port *ap); 429 void (*post_set_mode) (struct ata_port *ap);
383 430
384 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 431 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
@@ -387,7 +434,7 @@ struct ata_port_operations {
387 void (*bmdma_start) (struct ata_queued_cmd *qc); 434 void (*bmdma_start) (struct ata_queued_cmd *qc);
388 435
389 void (*qc_prep) (struct ata_queued_cmd *qc); 436 void (*qc_prep) (struct ata_queued_cmd *qc);
390 int (*qc_issue) (struct ata_queued_cmd *qc); 437 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
391 438
392 void (*eng_timeout) (struct ata_port *ap); 439 void (*eng_timeout) (struct ata_port *ap);
393 440
@@ -435,6 +482,18 @@ extern void ata_port_probe(struct ata_port *);
435extern void __sata_phy_reset(struct ata_port *ap); 482extern void __sata_phy_reset(struct ata_port *ap);
436extern void sata_phy_reset(struct ata_port *ap); 483extern void sata_phy_reset(struct ata_port *ap);
437extern void ata_bus_reset(struct ata_port *ap); 484extern void ata_bus_reset(struct ata_port *ap);
485extern int ata_drive_probe_reset(struct ata_port *ap,
486 ata_probeinit_fn_t probeinit,
487 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
488 ata_postreset_fn_t postreset, unsigned int *classes);
489extern void ata_std_probeinit(struct ata_port *ap);
490extern int ata_std_softreset(struct ata_port *ap, int verbose,
491 unsigned int *classes);
492extern int sata_std_hardreset(struct ata_port *ap, int verbose,
493 unsigned int *class);
494extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
495extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
496 int post_reset);
438extern void ata_port_disable(struct ata_port *); 497extern void ata_port_disable(struct ata_port *);
439extern void ata_std_ports(struct ata_ioports *ioaddr); 498extern void ata_std_ports(struct ata_ioports *ioaddr);
440#ifdef CONFIG_PCI 499#ifdef CONFIG_PCI
@@ -449,7 +508,10 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
449extern int ata_scsi_detect(struct scsi_host_template *sht); 508extern int ata_scsi_detect(struct scsi_host_template *sht);
450extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 509extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
451extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 510extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
511extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
452extern int ata_scsi_error(struct Scsi_Host *host); 512extern int ata_scsi_error(struct Scsi_Host *host);
513extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
514extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
453extern int ata_scsi_release(struct Scsi_Host *host); 515extern int ata_scsi_release(struct Scsi_Host *host);
454extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 516extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
455extern int ata_scsi_device_resume(struct scsi_device *); 517extern int ata_scsi_device_resume(struct scsi_device *);
@@ -457,6 +519,11 @@ extern int ata_scsi_device_suspend(struct scsi_device *);
457extern int ata_device_resume(struct ata_port *, struct ata_device *); 519extern int ata_device_resume(struct ata_port *, struct ata_device *);
458extern int ata_device_suspend(struct ata_port *, struct ata_device *); 520extern int ata_device_suspend(struct ata_port *, struct ata_device *);
459extern int ata_ratelimit(void); 521extern int ata_ratelimit(void);
522extern unsigned int ata_busy_sleep(struct ata_port *ap,
523 unsigned long timeout_pat,
524 unsigned long timeout);
525extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
526 void *data, unsigned long delay);
460 527
461/* 528/*
462 * Default driver ops implementations 529 * Default driver ops implementations
@@ -470,26 +537,28 @@ extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
470extern u8 ata_check_status(struct ata_port *ap); 537extern u8 ata_check_status(struct ata_port *ap);
471extern u8 ata_altstatus(struct ata_port *ap); 538extern u8 ata_altstatus(struct ata_port *ap);
472extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 539extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
540extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
473extern int ata_port_start (struct ata_port *ap); 541extern int ata_port_start (struct ata_port *ap);
474extern void ata_port_stop (struct ata_port *ap); 542extern void ata_port_stop (struct ata_port *ap);
475extern void ata_host_stop (struct ata_host_set *host_set); 543extern void ata_host_stop (struct ata_host_set *host_set);
476extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 544extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
477extern void ata_qc_prep(struct ata_queued_cmd *qc); 545extern void ata_qc_prep(struct ata_queued_cmd *qc);
478extern int ata_qc_issue_prot(struct ata_queued_cmd *qc); 546extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
479extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 547extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
480 unsigned int buflen); 548 unsigned int buflen);
481extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 549extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
482 unsigned int n_elem); 550 unsigned int n_elem);
483extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 551extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
484extern void ata_dev_id_string(const u16 *id, unsigned char *s, 552extern void ata_id_string(const u16 *id, unsigned char *s,
485 unsigned int ofs, unsigned int len); 553 unsigned int ofs, unsigned int len);
486extern void ata_dev_config(struct ata_port *ap, unsigned int i); 554extern void ata_id_c_string(const u16 *id, unsigned char *s,
555 unsigned int ofs, unsigned int len);
487extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 556extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
488extern void ata_bmdma_start (struct ata_queued_cmd *qc); 557extern void ata_bmdma_start (struct ata_queued_cmd *qc);
489extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 558extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
490extern u8 ata_bmdma_status(struct ata_port *ap); 559extern u8 ata_bmdma_status(struct ata_port *ap);
491extern void ata_bmdma_irq_clear(struct ata_port *ap); 560extern void ata_bmdma_irq_clear(struct ata_port *ap);
492extern void ata_qc_complete(struct ata_queued_cmd *qc); 561extern void __ata_qc_complete(struct ata_queued_cmd *qc);
493extern void ata_eng_timeout(struct ata_port *ap); 562extern void ata_eng_timeout(struct ata_port *ap);
494extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 563extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
495 struct scsi_cmnd *cmd, 564 struct scsi_cmnd *cmd,
@@ -586,10 +655,14 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
586 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 655 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
587} 656}
588 657
658static inline unsigned int ata_class_present(unsigned int class)
659{
660 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
661}
662
589static inline unsigned int ata_dev_present(const struct ata_device *dev) 663static inline unsigned int ata_dev_present(const struct ata_device *dev)
590{ 664{
591 return ((dev->class == ATA_DEV_ATA) || 665 return ata_class_present(dev->class);
592 (dev->class == ATA_DEV_ATAPI));
593} 666}
594 667
595static inline u8 ata_chk_status(struct ata_port *ap) 668static inline u8 ata_chk_status(struct ata_port *ap)
@@ -657,9 +730,9 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
657 730
658 if (status & (ATA_BUSY | ATA_DRQ)) { 731 if (status & (ATA_BUSY | ATA_DRQ)) {
659 unsigned long l = ap->ioaddr.status_addr; 732 unsigned long l = ap->ioaddr.status_addr;
660 printk(KERN_WARNING 733 if (ata_msg_warn(ap))
661 "ATA: abnormal status 0x%X on port 0x%lX\n", 734 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
662 status, l); 735 status, l);
663 } 736 }
664 737
665 return status; 738 return status;
@@ -701,6 +774,24 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
701 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 774 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
702} 775}
703 776
777/**
778 * ata_qc_complete - Complete an active ATA command
779 * @qc: Command to complete
780 * @err_mask: ATA Status register contents
781 *
782 * Indicate to the mid and upper layers that an ATA
783 * command has completed, with either an ok or not-ok status.
784 *
785 * LOCKING:
786 * spin_lock_irqsave(host_set lock)
787 */
788static inline void ata_qc_complete(struct ata_queued_cmd *qc)
789{
790 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
791 return;
792
793 __ata_qc_complete(qc);
794}
704 795
705/** 796/**
706 * ata_irq_on - Enable interrupts on a port. 797 * ata_irq_on - Enable interrupts on a port.
@@ -751,7 +842,8 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
751 842
752 status = ata_busy_wait(ap, bits, 1000); 843 status = ata_busy_wait(ap, bits, 1000);
753 if (status & bits) 844 if (status & bits)
754 DPRINTK("abnormal status 0x%X\n", status); 845 if (ata_msg_err(ap))
846 printk(KERN_ERR "abnormal status 0x%X\n", status);
755 847
756 /* get controller status; clear intr, err bits */ 848 /* get controller status; clear intr, err bits */
757 if (ap->flags & ATA_FLAG_MMIO) { 849 if (ap->flags & ATA_FLAG_MMIO) {
@@ -769,8 +861,10 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
769 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 861 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
770 } 862 }
771 863
772 VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 864 if (ata_msg_intr(ap))
773 host_stat, post_stat, status); 865 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
866 __FUNCTION__,
867 host_stat, post_stat, status);
774 868
775 return status; 869 return status;
776} 870}
@@ -807,7 +901,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
807static inline unsigned int ac_err_mask(u8 status) 901static inline unsigned int ac_err_mask(u8 status)
808{ 902{
809 if (status & ATA_BUSY) 903 if (status & ATA_BUSY)
810 return AC_ERR_ATA_BUS; 904 return AC_ERR_HSM;
811 if (status & (ATA_ERR | ATA_DF)) 905 if (status & (ATA_ERR | ATA_DF))
812 return AC_ERR_DEV; 906 return AC_ERR_DEV;
813 return 0; 907 return 0;
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index 0b08cd692201..955d3069d727 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -1214,6 +1214,7 @@ struct mv64xxx_i2c_pdata {
1214#define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 1214#define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0
1215#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) 1215#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7)
1216#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) 1216#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8)
1217#define MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED (1<<9)
1217#define MV643XX_ETH_FORCE_LINK_FAIL 0 1218#define MV643XX_ETH_FORCE_LINK_FAIL 0
1218#define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) 1219#define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10)
1219#define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 1220#define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0
@@ -1243,6 +1244,8 @@ struct mv64xxx_i2c_pdata {
1243#define MV643XX_ETH_SET_MII_SPEED_TO_10 0 1244#define MV643XX_ETH_SET_MII_SPEED_TO_10 0
1244#define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) 1245#define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24)
1245 1246
1247#define MV643XX_ETH_MAX_RX_PACKET_MASK (0x7<<17)
1248
1246#define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ 1249#define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \
1247 MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ 1250 MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \
1248 MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ 1251 MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \
@@ -1285,23 +1288,15 @@ struct mv64xxx_i2c_pdata {
1285#define MV643XX_ETH_NAME "mv643xx_eth" 1288#define MV643XX_ETH_NAME "mv643xx_eth"
1286 1289
1287struct mv643xx_eth_platform_data { 1290struct mv643xx_eth_platform_data {
1288 /*
1289 * Non-values for mac_addr, phy_addr, port_config, etc.
1290 * override the default value. Setting the corresponding
1291 * force_* field, causes the default value to be overridden
1292 * even when zero.
1293 */
1294 unsigned int force_phy_addr:1;
1295 unsigned int force_port_config:1;
1296 unsigned int force_port_config_extend:1;
1297 unsigned int force_port_sdma_config:1;
1298 unsigned int force_port_serial_control:1;
1299 int phy_addr;
1300 char *mac_addr; /* pointer to mac address */ 1291 char *mac_addr; /* pointer to mac address */
1301 u32 port_config; 1292 u16 force_phy_addr; /* force override if phy_addr == 0 */
1302 u32 port_config_extend; 1293 u16 phy_addr;
1303 u32 port_sdma_config; 1294
1304 u32 port_serial_control; 1295 /* If speed is 0, then speed and duplex are autonegotiated. */
1296 int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */
1297 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
1298
1299 /* non-zero values of the following fields override defaults */
1305 u32 tx_queue_size; 1300 u32 tx_queue_size;
1306 u32 rx_queue_size; 1301 u32 rx_queue_size;
1307 u32 tx_sram_addr; 1302 u32 tx_sram_addr;
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 4041122dabfc..57abcea1cb5d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -127,6 +127,9 @@
127/* Hilscher netx */ 127/* Hilscher netx */
128#define PORT_NETX 71 128#define PORT_NETX 71
129 129
130/* SUN4V Hypervisor Console */
131#define PORT_SUNHV 72
132
130#ifdef __KERNEL__ 133#ifdef __KERNEL__
131 134
132#include <linux/config.h> 135#include <linux/config.h>
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 5208b12d5550..724cfbf54b8a 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -17,11 +17,12 @@
17#include <linux/time.h> /* need struct timeval */ 17#include <linux/time.h> /* need struct timeval */
18#include <linux/poll.h> 18#include <linux/poll.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/mutex.h>
20#endif 21#endif
21#include <linux/compiler.h> /* need __user */ 22#include <linux/compiler.h> /* need __user */
22 23
23 24
24#define OBSOLETE_OWNER 1 /* It will be removed for 2.6.15 */ 25#define OBSOLETE_OWNER 1 /* It will be removed for 2.6.17 */
25#define HAVE_V4L2 1 26#define HAVE_V4L2 1
26 27
27/* 28/*
@@ -48,6 +49,16 @@
48 49
49#ifdef __KERNEL__ 50#ifdef __KERNEL__
50 51
52/* Minor device allocation */
53#define MINOR_VFL_TYPE_GRABBER_MIN 0
54#define MINOR_VFL_TYPE_GRABBER_MAX 63
55#define MINOR_VFL_TYPE_RADIO_MIN 64
56#define MINOR_VFL_TYPE_RADIO_MAX 127
57#define MINOR_VFL_TYPE_VTX_MIN 192
58#define MINOR_VFL_TYPE_VTX_MAX 223
59#define MINOR_VFL_TYPE_VBI_MIN 224
60#define MINOR_VFL_TYPE_VBI_MAX 255
61
51#define VFL_TYPE_GRABBER 0 62#define VFL_TYPE_GRABBER 0
52#define VFL_TYPE_VBI 1 63#define VFL_TYPE_VBI 1
53#define VFL_TYPE_RADIO 2 64#define VFL_TYPE_RADIO 2
@@ -80,7 +91,7 @@ struct video_device
80 91
81 /* for videodev.c intenal usage -- please don't touch */ 92 /* for videodev.c intenal usage -- please don't touch */
82 int users; /* video_exclusive_{open|close} ... */ 93 int users; /* video_exclusive_{open|close} ... */
83 struct semaphore lock; /* ... helper function uses these */ 94 struct mutex lock; /* ... helper function uses these */
84 char devfs_name[64]; /* devfs */ 95 char devfs_name[64]; /* devfs */
85 struct class_device class_dev; /* sysfs */ 96 struct class_device class_dev; /* sysfs */
86}; 97};
@@ -952,13 +963,68 @@ struct v4l2_sliced_vbi_format
952 __u32 reserved[2]; /* must be zero */ 963 __u32 reserved[2]; /* must be zero */
953}; 964};
954 965
955#define V4L2_SLICED_TELETEXT_B (0x0001) 966/* Teletext World System Teletext
956#define V4L2_SLICED_VPS (0x0400) 967 (WST), defined on ITU-R BT.653-2 */
957#define V4L2_SLICED_CAPTION_525 (0x1000) 968#define V4L2_SLICED_TELETEXT_PAL_B (0x000001)
958#define V4L2_SLICED_WSS_625 (0x4000) 969#define V4L2_SLICED_TELETEXT_PAL_C (0x000002)
959 970#define V4L2_SLICED_TELETEXT_NTSC_B (0x000010)
960#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) 971#define V4L2_SLICED_TELETEXT_SECAM (0x000020)
961#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) 972
973/* Teletext North American Broadcast Teletext Specification
974 (NABTS), defined on ITU-R BT.653-2 */
975#define V4L2_SLICED_TELETEXT_NTSC_C (0x000040)
976#define V4L2_SLICED_TELETEXT_NTSC_D (0x000080)
977
978/* Video Program System, defined on ETS 300 231*/
979#define V4L2_SLICED_VPS (0x000400)
980
981/* Closed Caption, defined on EIA-608 */
982#define V4L2_SLICED_CAPTION_525 (0x001000)
983#define V4L2_SLICED_CAPTION_625 (0x002000)
984
985/* Wide Screen System, defined on ITU-R BT1119.1 */
986#define V4L2_SLICED_WSS_625 (0x004000)
987
988/* Wide Screen System, defined on IEC 61880 */
989#define V4L2_SLICED_WSS_525 (0x008000)
990
991/* Vertical Interval Timecode (VITC), defined on SMPTE 12M */
992#define V4l2_SLICED_VITC_625 (0x010000)
993#define V4l2_SLICED_VITC_525 (0x020000)
994
995#define V4L2_SLICED_TELETEXT_B (V4L2_SLICED_TELETEXT_PAL_B |\
996 V4L2_SLICED_TELETEXT_NTSC_B)
997
998#define V4L2_SLICED_TELETEXT (V4L2_SLICED_TELETEXT_PAL_B |\
999 V4L2_SLICED_TELETEXT_PAL_C |\
1000 V4L2_SLICED_TELETEXT_SECAM |\
1001 V4L2_SLICED_TELETEXT_NTSC_B |\
1002 V4L2_SLICED_TELETEXT_NTSC_C |\
1003 V4L2_SLICED_TELETEXT_NTSC_D)
1004
1005#define V4L2_SLICED_CAPTION (V4L2_SLICED_CAPTION_525 |\
1006 V4L2_SLICED_CAPTION_625)
1007
1008#define V4L2_SLICED_WSS (V4L2_SLICED_WSS_525 |\
1009 V4L2_SLICED_WSS_625)
1010
1011#define V4L2_SLICED_VITC (V4L2_SLICED_VITC_525 |\
1012 V4L2_SLICED_VITC_625)
1013
1014#define V4L2_SLICED_VBI_525 (V4L2_SLICED_TELETEXT_NTSC_B |\
1015 V4L2_SLICED_TELETEXT_NTSC_C |\
1016 V4L2_SLICED_TELETEXT_NTSC_D |\
1017 V4L2_SLICED_CAPTION_525 |\
1018 V4L2_SLICED_WSS_525 |\
1019 V4l2_SLICED_VITC_525)
1020
1021#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_PAL_B |\
1022 V4L2_SLICED_TELETEXT_PAL_C |\
1023 V4L2_SLICED_TELETEXT_SECAM |\
1024 V4L2_SLICED_VPS |\
1025 V4L2_SLICED_CAPTION_625 |\
1026 V4L2_SLICED_WSS_625 |\
1027 V4l2_SLICED_VITC_625)
962 1028
963struct v4l2_sliced_vbi_cap 1029struct v4l2_sliced_vbi_cap
964{ 1030{
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index ad3e9bb670c3..302d5b3946e7 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -47,13 +47,6 @@ struct ir_input_state {
47 int keypressed; /* current state */ 47 int keypressed; /* current state */
48}; 48};
49 49
50extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE];
51extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE];
52extern IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE];
53extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE];
54extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE];
55extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE];
56
57void ir_input_init(struct input_dev *dev, struct ir_input_state *ir, 50void ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
58 int ir_type, IR_KEYTAB_TYPE *ir_codes); 51 int ir_type, IR_KEYTAB_TYPE *ir_codes);
59void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir); 52void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir);
@@ -64,6 +57,39 @@ int ir_dump_samples(u32 *samples, int count);
64int ir_decode_biphase(u32 *samples, int count, int low, int high); 57int ir_decode_biphase(u32 *samples, int count, int low, int high);
65int ir_decode_pulsedistance(u32 *samples, int count, int low, int high); 58int ir_decode_pulsedistance(u32 *samples, int count, int low, int high);
66 59
60/* Keymaps to be used by other modules */
61
62extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE];
63extern IR_KEYTAB_TYPE ir_codes_avermedia[IR_KEYTAB_SIZE];
64extern IR_KEYTAB_TYPE ir_codes_avermedia_dvbt[IR_KEYTAB_SIZE];
65extern IR_KEYTAB_TYPE ir_codes_apac_viewcomp[IR_KEYTAB_SIZE];
66extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE];
67extern IR_KEYTAB_TYPE ir_codes_nebula[IR_KEYTAB_SIZE];
68extern IR_KEYTAB_TYPE ir_codes_dntv_live_dvb_t[IR_KEYTAB_SIZE];
69extern IR_KEYTAB_TYPE ir_codes_iodata_bctv7e[IR_KEYTAB_SIZE];
70extern IR_KEYTAB_TYPE ir_codes_adstech_dvb_t_pci[IR_KEYTAB_SIZE];
71extern IR_KEYTAB_TYPE ir_codes_msi_tvanywhere[IR_KEYTAB_SIZE];
72extern IR_KEYTAB_TYPE ir_codes_cinergy_1400[IR_KEYTAB_SIZE];
73extern IR_KEYTAB_TYPE ir_codes_avertv_303[IR_KEYTAB_SIZE];
74extern IR_KEYTAB_TYPE ir_codes_dntv_live_dvbt_pro[IR_KEYTAB_SIZE];
75extern IR_KEYTAB_TYPE ir_codes_em_terratec[IR_KEYTAB_SIZE];
76extern IR_KEYTAB_TYPE ir_codes_em_pinnacle_usb[IR_KEYTAB_SIZE];
77extern IR_KEYTAB_TYPE ir_codes_flyvideo[IR_KEYTAB_SIZE];
78extern IR_KEYTAB_TYPE ir_codes_flydvb[IR_KEYTAB_SIZE];
79extern IR_KEYTAB_TYPE ir_codes_cinergy[IR_KEYTAB_SIZE];
80extern IR_KEYTAB_TYPE ir_codes_eztv[IR_KEYTAB_SIZE];
81extern IR_KEYTAB_TYPE ir_codes_avermedia[IR_KEYTAB_SIZE];
82extern IR_KEYTAB_TYPE ir_codes_videomate_tv_pvr[IR_KEYTAB_SIZE];
83extern IR_KEYTAB_TYPE ir_codes_manli[IR_KEYTAB_SIZE];
84extern IR_KEYTAB_TYPE ir_codes_gotview7135[IR_KEYTAB_SIZE];
85extern IR_KEYTAB_TYPE ir_codes_purpletv[IR_KEYTAB_SIZE];
86extern IR_KEYTAB_TYPE ir_codes_pctv_sedna[IR_KEYTAB_SIZE];
87extern IR_KEYTAB_TYPE ir_codes_pv951[IR_KEYTAB_SIZE];
88extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE];
89extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE];
90extern IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE];
91extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE];
92
67#endif 93#endif
68 94
69/* 95/*
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index 2bc634fcb7bb..fee579f10b32 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -11,6 +11,8 @@
11#include <linux/i2c.h> /* for i2c subsystem */ 11#include <linux/i2c.h> /* for i2c subsystem */
12#include <asm/io.h> /* for accessing devices */ 12#include <asm/io.h> /* for accessing devices */
13#include <linux/stringify.h> 13#include <linux/stringify.h>
14#include <linux/mutex.h>
15
14#include <linux/vmalloc.h> /* for vmalloc() */ 16#include <linux/vmalloc.h> /* for vmalloc() */
15#include <linux/mm.h> /* for vmalloc_to_page() */ 17#include <linux/mm.h> /* for vmalloc_to_page() */
16 18
@@ -112,7 +114,7 @@ struct saa7146_dev
112 114
113 /* different device locks */ 115 /* different device locks */
114 spinlock_t slock; 116 spinlock_t slock;
115 struct semaphore lock; 117 struct mutex lock;
116 118
117 unsigned char __iomem *mem; /* pointer to mapped IO memory */ 119 unsigned char __iomem *mem; /* pointer to mapped IO memory */
118 int revision; /* chip revision; needed for bug-workarounds*/ 120 int revision; /* chip revision; needed for bug-workarounds*/
@@ -133,15 +135,16 @@ struct saa7146_dev
133 void (*vv_callback)(struct saa7146_dev *dev, unsigned long status); 135 void (*vv_callback)(struct saa7146_dev *dev, unsigned long status);
134 136
135 /* i2c-stuff */ 137 /* i2c-stuff */
136 struct semaphore i2c_lock; 138 struct mutex i2c_lock;
137 u32 i2c_bitrate; 139
138 struct saa7146_dma d_i2c; /* pointer to i2c memory */ 140 u32 i2c_bitrate;
139 wait_queue_head_t i2c_wq; 141 struct saa7146_dma d_i2c; /* pointer to i2c memory */
140 int i2c_op; 142 wait_queue_head_t i2c_wq;
143 int i2c_op;
141 144
142 /* memories */ 145 /* memories */
143 struct saa7146_dma d_rps0; 146 struct saa7146_dma d_rps0;
144 struct saa7146_dma d_rps1; 147 struct saa7146_dma d_rps1;
145}; 148};
146 149
147/* from saa7146_i2c.c */ 150/* from saa7146_i2c.c */
@@ -150,7 +153,7 @@ int saa7146_i2c_transfer(struct saa7146_dev *saa, const struct i2c_msg *msgs, in
150 153
151/* from saa7146_core.c */ 154/* from saa7146_core.c */
152extern struct list_head saa7146_devices; 155extern struct list_head saa7146_devices;
153extern struct semaphore saa7146_devices_lock; 156extern struct mutex saa7146_devices_lock;
154int saa7146_register_extension(struct saa7146_extension*); 157int saa7146_register_extension(struct saa7146_extension*);
155int saa7146_unregister_extension(struct saa7146_extension*); 158int saa7146_unregister_extension(struct saa7146_extension*);
156struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc); 159struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc);
diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h
index 15821ab14a9e..ad9c171bfa07 100644
--- a/include/media/tuner-types.h
+++ b/include/media/tuner-types.h
@@ -14,6 +14,7 @@ enum param_type {
14 14
15struct tuner_range { 15struct tuner_range {
16 unsigned short limit; 16 unsigned short limit;
17 unsigned char config;
17 unsigned char cb; 18 unsigned char cb;
18}; 19};
19 20
@@ -38,7 +39,6 @@ struct tuner_params {
38 * static unless the control byte was sent first. 39 * static unless the control byte was sent first.
39 */ 40 */
40 unsigned int cb_first_if_lower_freq:1; 41 unsigned int cb_first_if_lower_freq:1;
41 unsigned char config; /* to be moved into struct tuner_range for dvb-pll merge */
42 42
43 unsigned int count; 43 unsigned int count;
44 struct tuner_range *ranges; 44 struct tuner_range *ranges;
@@ -46,6 +46,7 @@ struct tuner_params {
46 46
47struct tunertype { 47struct tunertype {
48 char *name; 48 char *name;
49 unsigned int count;
49 struct tuner_params *params; 50 struct tuner_params *params;
50}; 51};
51 52
diff --git a/include/media/tuner.h b/include/media/tuner.h
index a5beeac495c7..017fed7d5e4d 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -110,12 +110,15 @@
110 110
111#define TUNER_LG_TDVS_H062F 64 /* DViCO FusionHDTV 5 */ 111#define TUNER_LG_TDVS_H062F 64 /* DViCO FusionHDTV 5 */
112#define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */ 112#define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */
113#define TUNER_LG_NTSC_TALN_MINI 66 113#define TUNER_LG_TALN 66
114#define TUNER_PHILIPS_TD1316 67 114#define TUNER_PHILIPS_TD1316 67
115 115
116#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ 116#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */
117#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */ 117#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */
118#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */ 118#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */
119#define TUNER_XCEIVE_XC3028 71
120
121#define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */
119 122
120/* tv card specific */ 123/* tv card specific */
121#define TDA9887_PRESENT (1<<0) 124#define TDA9887_PRESENT (1<<0)
@@ -209,6 +212,7 @@ struct tuner {
209extern unsigned const int tuner_count; 212extern unsigned const int tuner_count;
210 213
211extern int microtune_init(struct i2c_client *c); 214extern int microtune_init(struct i2c_client *c);
215extern int xc3028_init(struct i2c_client *c);
212extern int tda8290_init(struct i2c_client *c); 216extern int tda8290_init(struct i2c_client *c);
213extern int tda8290_probe(struct i2c_client *c); 217extern int tda8290_probe(struct i2c_client *c);
214extern int tea5767_tuner_init(struct i2c_client *c); 218extern int tea5767_tuner_init(struct i2c_client *c);
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index d4030a7e16e0..2360453e7496 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -58,6 +58,9 @@
58/* Prints the ioctl in a human-readable format */ 58/* Prints the ioctl in a human-readable format */
59extern void v4l_printk_ioctl(unsigned int cmd); 59extern void v4l_printk_ioctl(unsigned int cmd);
60 60
61/* Prints the ioctl and arg in a human-readable format */
62extern void v4l_printk_ioctl_arg(char *s,unsigned int cmd, void *arg);
63
61/* Use this macro for non-I2C drivers. Pass the driver name as the first arg. */ 64/* Use this macro for non-I2C drivers. Pass the driver name as the first arg. */
62#define v4l_print_ioctl(name, cmd) \ 65#define v4l_print_ioctl(name, cmd) \
63 do { \ 66 do { \
@@ -100,6 +103,7 @@ enum v4l2_chip_ident {
100 V4L2_IDENT_UNKNOWN = 0, 103 V4L2_IDENT_UNKNOWN = 0,
101 104
102 /* module saa7115: reserved range 100-149 */ 105 /* module saa7115: reserved range 100-149 */
106 V4L2_IDENT_SAA7113 = 103,
103 V4L2_IDENT_SAA7114 = 104, 107 V4L2_IDENT_SAA7114 = 104,
104 V4L2_IDENT_SAA7115 = 105, 108 V4L2_IDENT_SAA7115 = 105,
105 109
@@ -115,12 +119,15 @@ enum v4l2_chip_ident {
115}; 119};
116 120
117/* audio ioctls */ 121/* audio ioctls */
118/* v4l device was opened in Radio mode */ 122
123/* v4l device was opened in Radio mode, to be replaced by VIDIOC_INT_S_TUNER_MODE */
119#define AUDC_SET_RADIO _IO('d',88) 124#define AUDC_SET_RADIO _IO('d',88)
120/* select from TV,radio,extern,MUTE */ 125
126/* select from TV,radio,extern,MUTE, to be replaced with VIDIOC_INT_S_AUDIO_ROUTING */
121#define AUDC_SET_INPUT _IOW('d',89,int) 127#define AUDC_SET_INPUT _IOW('d',89,int)
122 128
123/* msp3400 ioctl: will be removed in the near future */ 129/* msp3400 ioctl: will be removed in the near future, to be replaced by
130 VIDIOC_INT_S_AUDIO_ROUTING. */
124struct msp_matrix { 131struct msp_matrix {
125 int input; 132 int input;
126 int output; 133 int output;
@@ -128,12 +135,25 @@ struct msp_matrix {
128#define MSP_SET_MATRIX _IOW('m',17,struct msp_matrix) 135#define MSP_SET_MATRIX _IOW('m',17,struct msp_matrix)
129 136
130/* tuner ioctls */ 137/* tuner ioctls */
138
131/* Sets tuner type and its I2C addr */ 139/* Sets tuner type and its I2C addr */
132#define TUNER_SET_TYPE_ADDR _IOW('d',90,int) 140#define TUNER_SET_TYPE_ADDR _IOW('d', 90, int)
133/* Puts tuner on powersaving state, disabling it, except for i2c */ 141
134#define TUNER_SET_STANDBY _IOW('d',91,int) 142/* Puts tuner on powersaving state, disabling it, except for i2c. To be replaced
143 by VIDIOC_INT_S_STANDBY. */
144#define TUNER_SET_STANDBY _IOW('d', 91, int)
145
135/* Sets tda9887 specific stuff, like port1, port2 and qss */ 146/* Sets tda9887 specific stuff, like port1, port2 and qss */
136#define TDA9887_SET_CONFIG _IOW('d',92,int) 147#define TDA9887_SET_CONFIG _IOW('d', 92, int)
148
149/* Switch the tuner to a specific tuner mode. Replacement of AUDC_SET_RADIO */
150#define VIDIOC_INT_S_TUNER_MODE _IOW('d', 93, enum v4l2_tuner_type)
151
152/* Generic standby command. Passing -1 (all bits set to 1) will put the whole
153 chip into standby mode, value 0 will make the chip fully active. Specific
154 bits can be used by certain chips to enable/disable specific subsystems.
155 Replacement of TUNER_SET_STANDBY. */
156#define VIDIOC_INT_S_STANDBY _IOW('d', 94, u32)
137 157
138/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ 158/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
139#define VIDIOC_INT_S_REGISTER _IOR ('d', 100, struct v4l2_register) 159#define VIDIOC_INT_S_REGISTER _IOR ('d', 100, struct v4l2_register)
@@ -160,7 +180,8 @@ struct msp_matrix {
160 180
161/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is 181/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is
162 filled with the data packets that should be output. Note that if you set 182 filled with the data packets that should be output. Note that if you set
163 the line field to 0, then that VBI signal is disabled. */ 183 the line field to 0, then that VBI signal is disabled. If no
184 valid VBI data was found, then the type field is set to 0 on return. */
164#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data) 185#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data)
165 186
166/* Used to obtain the sliced VBI packet from a readback register. Not all 187/* Used to obtain the sliced VBI packet from a readback register. Not all
@@ -168,11 +189,11 @@ struct msp_matrix {
168 register contains invalid or erroneous data -EIO is returned. Note that 189 register contains invalid or erroneous data -EIO is returned. Note that
169 you must fill in the 'id' member and the 'field' member (to determine 190 you must fill in the 'id' member and the 'field' member (to determine
170 whether CC data from the first or second field should be obtained). */ 191 whether CC data from the first or second field should be obtained). */
171#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data *) 192#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data)
172 193
173/* Returns the chip identifier or V4L2_IDENT_UNKNOWN if no identification can 194/* Returns the chip identifier or V4L2_IDENT_UNKNOWN if no identification can
174 be made. */ 195 be made. */
175#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident *) 196#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident)
176 197
177/* Sets I2S speed in bps. This is used to provide a standard way to select I2S 198/* Sets I2S speed in bps. This is used to provide a standard way to select I2S
178 clock used by driving digital audio streams at some board designs. 199 clock used by driving digital audio streams at some board designs.
@@ -180,4 +201,25 @@ struct msp_matrix {
180 If the frequency is not supported, then -EINVAL is returned. */ 201 If the frequency is not supported, then -EINVAL is returned. */
181#define VIDIOC_INT_I2S_CLOCK_FREQ _IOW ('d', 108, u32) 202#define VIDIOC_INT_I2S_CLOCK_FREQ _IOW ('d', 108, u32)
182 203
204/* Routing definition, device dependent. It specifies which inputs (if any)
205 should be routed to which outputs (if any). */
206struct v4l2_routing {
207 u32 input;
208 u32 output;
209};
210
211/* These internal commands should be used to define the inputs and outputs
212 of an audio/video chip. They will replace AUDC_SET_INPUT.
213 The v4l2 API commands VIDIOC_S/G_INPUT, VIDIOC_S/G_OUTPUT,
214 VIDIOC_S/G_AUDIO and VIDIOC_S/G_AUDOUT are meant to be used by the
215 user. Internally these commands should be used to switch inputs/outputs
216 because only the driver knows how to map a 'Television' input to the precise
217 input/output routing of an A/D converter, or a DSP, or a video digitizer.
218 These four commands should only be sent directly to an i2c device, they
219 should not be broadcast as the routing is very device specific. */
220#define VIDIOC_INT_S_AUDIO_ROUTING _IOW ('d', 109, struct v4l2_routing)
221#define VIDIOC_INT_G_AUDIO_ROUTING _IOR ('d', 110, struct v4l2_routing)
222#define VIDIOC_INT_S_VIDEO_ROUTING _IOW ('d', 111, struct v4l2_routing)
223#define VIDIOC_INT_G_VIDEO_ROUTING _IOR ('d', 112, struct v4l2_routing)
224
183#endif /* V4L2_COMMON_H_ */ 225#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/video-buf-dvb.h b/include/media/video-buf-dvb.h
index ad0a07a3a895..b78d90fe629f 100644
--- a/include/media/video-buf-dvb.h
+++ b/include/media/video-buf-dvb.h
@@ -11,7 +11,7 @@ struct videobuf_dvb {
11 struct videobuf_queue dvbq; 11 struct videobuf_queue dvbq;
12 12
13 /* video-buf-dvb state info */ 13 /* video-buf-dvb state info */
14 struct semaphore lock; 14 struct mutex lock;
15 struct task_struct *thread; 15 struct task_struct *thread;
16 int nfeeds; 16 int nfeeds;
17 17
diff --git a/include/media/video-buf.h b/include/media/video-buf.h
index 8ecfd78e0027..d90dec5484ee 100644
--- a/include/media/video-buf.h
+++ b/include/media/video-buf.h
@@ -177,7 +177,7 @@ struct videobuf_queue_ops {
177}; 177};
178 178
179struct videobuf_queue { 179struct videobuf_queue {
180 struct semaphore lock; 180 struct mutex lock;
181 spinlock_t *irqlock; 181 spinlock_t *irqlock;
182 struct pci_dev *pci; 182 struct pci_dev *pci;
183 183
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index 9a92aef8b0b2..4725ff861c57 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -220,6 +220,7 @@ struct ieee80211_snap_hdr {
220/* Authentication algorithms */ 220/* Authentication algorithms */
221#define WLAN_AUTH_OPEN 0 221#define WLAN_AUTH_OPEN 0
222#define WLAN_AUTH_SHARED_KEY 1 222#define WLAN_AUTH_SHARED_KEY 1
223#define WLAN_AUTH_LEAP 2
223 224
224#define WLAN_AUTH_CHALLENGE_LEN 128 225#define WLAN_AUTH_CHALLENGE_LEN 128
225 226
@@ -299,6 +300,23 @@ enum ieee80211_reasoncode {
299 WLAN_REASON_CIPHER_SUITE_REJECTED = 24, 300 WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
300}; 301};
301 302
303/* Action categories - 802.11h */
304enum ieee80211_actioncategories {
305 WLAN_ACTION_SPECTRUM_MGMT = 0,
306 /* Reserved 1-127 */
307 /* Error 128-255 */
308};
309
310/* Action details - 802.11h */
311enum ieee80211_actiondetails {
312 WLAN_ACTION_CATEGORY_MEASURE_REQUEST = 0,
313 WLAN_ACTION_CATEGORY_MEASURE_REPORT = 1,
314 WLAN_ACTION_CATEGORY_TPC_REQUEST = 2,
315 WLAN_ACTION_CATEGORY_TPC_REPORT = 3,
316 WLAN_ACTION_CATEGORY_CHANNEL_SWITCH = 4,
317 /* 5 - 255 Reserved */
318};
319
302#define IEEE80211_STATMASK_SIGNAL (1<<0) 320#define IEEE80211_STATMASK_SIGNAL (1<<0)
303#define IEEE80211_STATMASK_RSSI (1<<1) 321#define IEEE80211_STATMASK_RSSI (1<<1)
304#define IEEE80211_STATMASK_NOISE (1<<2) 322#define IEEE80211_STATMASK_NOISE (1<<2)
@@ -377,6 +395,8 @@ struct ieee80211_rx_stats {
377 u8 mask; 395 u8 mask;
378 u8 freq; 396 u8 freq;
379 u16 len; 397 u16 len;
398 u64 tsf;
399 u32 beacon_time;
380}; 400};
381 401
382/* IEEE 802.11 requires that STA supports concurrent reception of at least 402/* IEEE 802.11 requires that STA supports concurrent reception of at least
@@ -608,6 +628,28 @@ struct ieee80211_auth {
608 struct ieee80211_info_element info_element[0]; 628 struct ieee80211_info_element info_element[0];
609} __attribute__ ((packed)); 629} __attribute__ ((packed));
610 630
631struct ieee80211_channel_switch {
632 u8 id;
633 u8 len;
634 u8 mode;
635 u8 channel;
636 u8 count;
637} __attribute__ ((packed));
638
639struct ieee80211_action {
640 struct ieee80211_hdr_3addr header;
641 u8 category;
642 u8 action;
643 union {
644 struct ieee80211_action_exchange {
645 u8 token;
646 struct ieee80211_info_element info_element[0];
647 } exchange;
648 struct ieee80211_channel_switch channel_switch;
649
650 } format;
651} __attribute__ ((packed));
652
611struct ieee80211_disassoc { 653struct ieee80211_disassoc {
612 struct ieee80211_hdr_3addr header; 654 struct ieee80211_hdr_3addr header;
613 __le16 reason; 655 __le16 reason;
@@ -692,7 +734,15 @@ struct ieee80211_txb {
692/* QoS structure */ 734/* QoS structure */
693#define NETWORK_HAS_QOS_PARAMETERS (1<<3) 735#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
694#define NETWORK_HAS_QOS_INFORMATION (1<<4) 736#define NETWORK_HAS_QOS_INFORMATION (1<<4)
695#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION) 737#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \
738 NETWORK_HAS_QOS_INFORMATION)
739
740/* 802.11h */
741#define NETWORK_HAS_POWER_CONSTRAINT (1<<5)
742#define NETWORK_HAS_CSA (1<<6)
743#define NETWORK_HAS_QUIET (1<<7)
744#define NETWORK_HAS_IBSS_DFS (1<<8)
745#define NETWORK_HAS_TPC_REPORT (1<<9)
696 746
697#define QOS_QUEUE_NUM 4 747#define QOS_QUEUE_NUM 4
698#define QOS_OUI_LEN 3 748#define QOS_OUI_LEN 3
@@ -748,6 +798,91 @@ struct ieee80211_tim_parameters {
748 798
749/*******************************************************/ 799/*******************************************************/
750 800
801enum { /* ieee80211_basic_report.map */
802 IEEE80211_BASIC_MAP_BSS = (1 << 0),
803 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
804 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
805 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
806 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
807 /* Bits 5-7 are reserved */
808
809};
810struct ieee80211_basic_report {
811 u8 channel;
812 __le64 start_time;
813 __le16 duration;
814 u8 map;
815} __attribute__ ((packed));
816
817enum { /* ieee80211_measurement_request.mode */
818 /* Bit 0 is reserved */
819 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
820 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
821 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
822 /* Bits 4-7 are reserved */
823};
824
825enum {
826 IEEE80211_REPORT_BASIC = 0, /* required */
827 IEEE80211_REPORT_CCA = 1, /* optional */
828 IEEE80211_REPORT_RPI = 2, /* optional */
829 /* 3-255 reserved */
830};
831
832struct ieee80211_measurement_params {
833 u8 channel;
834 __le64 start_time;
835 __le16 duration;
836} __attribute__ ((packed));
837
838struct ieee80211_measurement_request {
839 struct ieee80211_info_element ie;
840 u8 token;
841 u8 mode;
842 u8 type;
843 struct ieee80211_measurement_params params[0];
844} __attribute__ ((packed));
845
846struct ieee80211_measurement_report {
847 struct ieee80211_info_element ie;
848 u8 token;
849 u8 mode;
850 u8 type;
851 union {
852 struct ieee80211_basic_report basic[0];
853 } u;
854} __attribute__ ((packed));
855
856struct ieee80211_tpc_report {
857 u8 transmit_power;
858 u8 link_margin;
859} __attribute__ ((packed));
860
861struct ieee80211_channel_map {
862 u8 channel;
863 u8 map;
864} __attribute__ ((packed));
865
866struct ieee80211_ibss_dfs {
867 struct ieee80211_info_element ie;
868 u8 owner[ETH_ALEN];
869 u8 recovery_interval;
870 struct ieee80211_channel_map channel_map[0];
871};
872
873struct ieee80211_csa {
874 u8 mode;
875 u8 channel;
876 u8 count;
877} __attribute__ ((packed));
878
879struct ieee80211_quiet {
880 u8 count;
881 u8 period;
882 u8 duration;
883 u8 offset;
884} __attribute__ ((packed));
885
751struct ieee80211_network { 886struct ieee80211_network {
752 /* These entries are used to identify a unique network */ 887 /* These entries are used to identify a unique network */
753 u8 bssid[ETH_ALEN]; 888 u8 bssid[ETH_ALEN];
@@ -767,7 +902,7 @@ struct ieee80211_network {
767 u8 rates_ex_len; 902 u8 rates_ex_len;
768 unsigned long last_scanned; 903 unsigned long last_scanned;
769 u8 mode; 904 u8 mode;
770 u8 flags; 905 u32 flags;
771 u32 last_associate; 906 u32 last_associate;
772 u32 time_stamp[2]; 907 u32 time_stamp[2];
773 u16 beacon_interval; 908 u16 beacon_interval;
@@ -779,6 +914,25 @@ struct ieee80211_network {
779 u8 rsn_ie[MAX_WPA_IE_LEN]; 914 u8 rsn_ie[MAX_WPA_IE_LEN];
780 size_t rsn_ie_len; 915 size_t rsn_ie_len;
781 struct ieee80211_tim_parameters tim; 916 struct ieee80211_tim_parameters tim;
917
918 /* 802.11h info */
919
920 /* Power Constraint - mandatory if spctrm mgmt required */
921 u8 power_constraint;
922
923 /* TPC Report - mandatory if spctrm mgmt required */
924 struct ieee80211_tpc_report tpc_report;
925
926 /* IBSS DFS - mandatory if spctrm mgmt required and IBSS
927 * NOTE: This is variable length and so must be allocated dynamically */
928 struct ieee80211_ibss_dfs *ibss_dfs;
929
930 /* Channel Switch Announcement - optional if spctrm mgmt required */
931 struct ieee80211_csa csa;
932
933 /* Quiet - optional if spctrm mgmt required */
934 struct ieee80211_quiet quiet;
935
782 struct list_head list; 936 struct list_head list;
783}; 937};
784 938
@@ -924,7 +1078,10 @@ struct ieee80211_device {
924 int (*handle_auth) (struct net_device * dev, 1078 int (*handle_auth) (struct net_device * dev,
925 struct ieee80211_auth * auth); 1079 struct ieee80211_auth * auth);
926 int (*handle_deauth) (struct net_device * dev, 1080 int (*handle_deauth) (struct net_device * dev,
927 struct ieee80211_auth * auth); 1081 struct ieee80211_deauth * auth);
1082 int (*handle_action) (struct net_device * dev,
1083 struct ieee80211_action * action,
1084 struct ieee80211_rx_stats * stats);
928 int (*handle_disassoc) (struct net_device * dev, 1085 int (*handle_disassoc) (struct net_device * dev,
929 struct ieee80211_disassoc * assoc); 1086 struct ieee80211_disassoc * assoc);
930 int (*handle_beacon) (struct net_device * dev, 1087 int (*handle_beacon) (struct net_device * dev,
@@ -1093,6 +1250,7 @@ extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
1093extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, 1250extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1094 struct ieee80211_hdr_4addr *header, 1251 struct ieee80211_hdr_4addr *header,
1095 struct ieee80211_rx_stats *stats); 1252 struct ieee80211_rx_stats *stats);
1253extern void ieee80211_network_reset(struct ieee80211_network *network);
1096 1254
1097/* ieee80211_geo.c */ 1255/* ieee80211_geo.c */
1098extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device 1256extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device
@@ -1105,6 +1263,11 @@ extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee,
1105extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, 1263extern int ieee80211_channel_to_index(struct ieee80211_device *ieee,
1106 u8 channel); 1264 u8 channel);
1107extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); 1265extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq);
1266extern u8 ieee80211_get_channel_flags(struct ieee80211_device *ieee,
1267 u8 channel);
1268extern const struct ieee80211_channel *ieee80211_get_channel(struct
1269 ieee80211_device
1270 *ieee, u8 channel);
1108 1271
1109/* ieee80211_wx.c */ 1272/* ieee80211_wx.c */
1110extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, 1273extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
@@ -1122,6 +1285,14 @@ extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
1122extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, 1285extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
1123 struct iw_request_info *info, 1286 struct iw_request_info *info,
1124 union iwreq_data *wrqu, char *extra); 1287 union iwreq_data *wrqu, char *extra);
1288extern int ieee80211_wx_set_auth(struct net_device *dev,
1289 struct iw_request_info *info,
1290 union iwreq_data *wrqu,
1291 char *extra);
1292extern int ieee80211_wx_get_auth(struct net_device *dev,
1293 struct iw_request_info *info,
1294 union iwreq_data *wrqu,
1295 char *extra);
1125 1296
1126static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) 1297static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
1127{ 1298{
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h
index cd82c3e998e4..eb476414fd72 100644
--- a/include/net/ieee80211_crypt.h
+++ b/include/net/ieee80211_crypt.h
@@ -47,7 +47,8 @@ struct ieee80211_crypto_ops {
47 /* deinitialize crypto context and free allocated private data */ 47 /* deinitialize crypto context and free allocated private data */
48 void (*deinit) (void *priv); 48 void (*deinit) (void *priv);
49 49
50 int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv); 50 int (*build_iv) (struct sk_buff * skb, int hdr_len,
51 u8 *key, int keylen, void *priv);
51 52
52 /* encrypt/decrypt return < 0 on error or >= 0 on success. The return 53 /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
53 * value from decrypt_mpdu is passed as the keyidx value for 54 * value from decrypt_mpdu is passed as the keyidx value for
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index fabd879c2f2e..d160880b2a87 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -35,6 +35,9 @@ static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
35} 35}
36 36
37 37
38extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
39 struct list_head *done_q);
40extern void scsi_eh_flush_done_q(struct list_head *done_q);
38extern void scsi_report_bus_reset(struct Scsi_Host *, int); 41extern void scsi_report_bus_reset(struct Scsi_Host *, int);
39extern void scsi_report_device_reset(struct Scsi_Host *, int, int); 42extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
40extern int scsi_block_when_processing_errors(struct scsi_device *); 43extern int scsi_block_when_processing_errors(struct scsi_device *);
diff --git a/kernel/exit.c b/kernel/exit.c
index 531aadca5530..d1e8d500a7e1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -807,8 +807,6 @@ fastcall NORET_TYPE void do_exit(long code)
807 panic("Attempted to kill the idle task!"); 807 panic("Attempted to kill the idle task!");
808 if (unlikely(tsk->pid == 1)) 808 if (unlikely(tsk->pid == 1))
809 panic("Attempted to kill init!"); 809 panic("Attempted to kill init!");
810 if (tsk->io_context)
811 exit_io_context();
812 810
813 if (unlikely(current->ptrace & PT_TRACE_EXIT)) { 811 if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
814 current->ptrace_message = code; 812 current->ptrace_message = code;
@@ -822,6 +820,8 @@ fastcall NORET_TYPE void do_exit(long code)
822 if (unlikely(tsk->flags & PF_EXITING)) { 820 if (unlikely(tsk->flags & PF_EXITING)) {
823 printk(KERN_ALERT 821 printk(KERN_ALERT
824 "Fixing recursive fault but reboot is needed!\n"); 822 "Fixing recursive fault but reboot is needed!\n");
823 if (tsk->io_context)
824 exit_io_context();
825 set_current_state(TASK_UNINTERRUPTIBLE); 825 set_current_state(TASK_UNINTERRUPTIBLE);
826 schedule(); 826 schedule();
827 } 827 }
@@ -881,6 +881,9 @@ fastcall NORET_TYPE void do_exit(long code)
881 */ 881 */
882 mutex_debug_check_no_locks_held(tsk); 882 mutex_debug_check_no_locks_held(tsk);
883 883
884 if (tsk->io_context)
885 exit_io_context();
886
884 /* PF_DEAD causes final put_task_struct after we schedule. */ 887 /* PF_DEAD causes final put_task_struct after we schedule. */
885 preempt_disable(); 888 preempt_disable();
886 BUG_ON(tsk->flags & PF_DEAD); 889 BUG_ON(tsk->flags & PF_DEAD);
diff --git a/net/Kconfig b/net/Kconfig
index 5126f58d9c44..4193cdcd3ae7 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -224,6 +224,9 @@ source "net/irda/Kconfig"
224source "net/bluetooth/Kconfig" 224source "net/bluetooth/Kconfig"
225source "net/ieee80211/Kconfig" 225source "net/ieee80211/Kconfig"
226 226
227config WIRELESS_EXT
228 bool
229
227endif # if NET 230endif # if NET
228endmenu # Networking 231endmenu # Networking
229 232
diff --git a/net/core/Makefile b/net/core/Makefile
index 630da0f0579e..79fe12cced27 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -14,5 +14,5 @@ obj-$(CONFIG_XFRM) += flow.o
14obj-$(CONFIG_SYSFS) += net-sysfs.o 14obj-$(CONFIG_SYSFS) += net-sysfs.o
15obj-$(CONFIG_NET_DIVERT) += dv.o 15obj-$(CONFIG_NET_DIVERT) += dv.o
16obj-$(CONFIG_NET_PKTGEN) += pktgen.o 16obj-$(CONFIG_NET_PKTGEN) += pktgen.o
17obj-$(CONFIG_NET_RADIO) += wireless.o 17obj-$(CONFIG_WIRELESS_EXT) += wireless.o
18obj-$(CONFIG_NETPOLL) += netpoll.o 18obj-$(CONFIG_NETPOLL) += netpoll.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 2afb0de95329..ef56c035d44e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -110,10 +110,8 @@
110#include <linux/netpoll.h> 110#include <linux/netpoll.h>
111#include <linux/rcupdate.h> 111#include <linux/rcupdate.h>
112#include <linux/delay.h> 112#include <linux/delay.h>
113#ifdef CONFIG_NET_RADIO 113#include <linux/wireless.h>
114#include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
115#include <net/iw_handler.h> 114#include <net/iw_handler.h>
116#endif /* CONFIG_NET_RADIO */
117#include <asm/current.h> 115#include <asm/current.h>
118 116
119/* 117/*
@@ -1448,8 +1446,29 @@ static inline struct net_device *skb_bond(struct sk_buff *skb)
1448{ 1446{
1449 struct net_device *dev = skb->dev; 1447 struct net_device *dev = skb->dev;
1450 1448
1451 if (dev->master) 1449 if (dev->master) {
1450 /*
1451 * On bonding slaves other than the currently active
1452 * slave, suppress duplicates except for 802.3ad
1453 * ETH_P_SLOW and alb non-mcast/bcast.
1454 */
1455 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1456 if (dev->master->priv_flags & IFF_MASTER_ALB) {
1457 if (skb->pkt_type != PACKET_BROADCAST &&
1458 skb->pkt_type != PACKET_MULTICAST)
1459 goto keep;
1460 }
1461
1462 if (dev->master->priv_flags & IFF_MASTER_8023AD &&
1463 skb->protocol == __constant_htons(ETH_P_SLOW))
1464 goto keep;
1465
1466 kfree_skb(skb);
1467 return NULL;
1468 }
1469keep:
1452 skb->dev = dev->master; 1470 skb->dev = dev->master;
1471 }
1453 1472
1454 return dev; 1473 return dev;
1455} 1474}
@@ -1593,6 +1612,9 @@ int netif_receive_skb(struct sk_buff *skb)
1593 1612
1594 orig_dev = skb_bond(skb); 1613 orig_dev = skb_bond(skb);
1595 1614
1615 if (!orig_dev)
1616 return NET_RX_DROP;
1617
1596 __get_cpu_var(netdev_rx_stat).total++; 1618 __get_cpu_var(netdev_rx_stat).total++;
1597 1619
1598 skb->h.raw = skb->nh.raw = skb->data; 1620 skb->h.raw = skb->nh.raw = skb->data;
@@ -2028,7 +2050,7 @@ static struct file_operations softnet_seq_fops = {
2028 .release = seq_release, 2050 .release = seq_release,
2029}; 2051};
2030 2052
2031#ifdef WIRELESS_EXT 2053#ifdef CONFIG_WIRELESS_EXT
2032extern int wireless_proc_init(void); 2054extern int wireless_proc_init(void);
2033#else 2055#else
2034#define wireless_proc_init() 0 2056#define wireless_proc_init() 0
@@ -2582,7 +2604,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
2582 ret = -EFAULT; 2604 ret = -EFAULT;
2583 return ret; 2605 return ret;
2584 } 2606 }
2585#ifdef WIRELESS_EXT 2607#ifdef CONFIG_WIRELESS_EXT
2586 /* Take care of Wireless Extensions */ 2608 /* Take care of Wireless Extensions */
2587 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { 2609 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2588 /* If command is `set a parameter', or 2610 /* If command is `set a parameter', or
@@ -2603,7 +2625,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
2603 ret = -EFAULT; 2625 ret = -EFAULT;
2604 return ret; 2626 return ret;
2605 } 2627 }
2606#endif /* WIRELESS_EXT */ 2628#endif /* CONFIG_WIRELESS_EXT */
2607 return -EINVAL; 2629 return -EINVAL;
2608 } 2630 }
2609} 2631}
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
index ecc9bb196abc..cb71d794a7d1 100644
--- a/net/ieee80211/ieee80211_crypt.c
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -18,7 +18,6 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <net/ieee80211.h> 19#include <net/ieee80211.h>
20 20
21
22MODULE_AUTHOR("Jouni Malinen"); 21MODULE_AUTHOR("Jouni Malinen");
23MODULE_DESCRIPTION("HostAP crypto"); 22MODULE_DESCRIPTION("HostAP crypto");
24MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
@@ -33,11 +32,11 @@ static DEFINE_SPINLOCK(ieee80211_crypto_lock);
33 32
34void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) 33void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
35{ 34{
36 struct ieee80211_crypt_data *entry, *next; 35 struct ieee80211_crypt_data *entry, *next;
37 unsigned long flags; 36 unsigned long flags;
38 37
39 spin_lock_irqsave(&ieee->lock, flags); 38 spin_lock_irqsave(&ieee->lock, flags);
40 list_for_each_entry_safe(entry, next, &ieee->crypt_deinit_list, list) { 39 list_for_each_entry_safe(entry, next, &ieee->crypt_deinit_list, list) {
41 if (atomic_read(&entry->refcnt) != 0 && !force) 40 if (atomic_read(&entry->refcnt) != 0 && !force)
42 continue; 41 continue;
43 42
@@ -141,9 +140,9 @@ int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
141 spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); 140 spin_unlock_irqrestore(&ieee80211_crypto_lock, flags);
142 return -EINVAL; 141 return -EINVAL;
143 142
144 found: 143 found:
145 printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm " 144 printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
146 "'%s'\n", ops->name); 145 "'%s'\n", ops->name);
147 list_del(&alg->list); 146 list_del(&alg->list);
148 spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); 147 spin_unlock_irqrestore(&ieee80211_crypto_lock, flags);
149 kfree(alg); 148 kfree(alg);
@@ -163,7 +162,7 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name)
163 spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); 162 spin_unlock_irqrestore(&ieee80211_crypto_lock, flags);
164 return NULL; 163 return NULL;
165 164
166 found: 165 found:
167 spin_unlock_irqrestore(&ieee80211_crypto_lock, flags); 166 spin_unlock_irqrestore(&ieee80211_crypto_lock, flags);
168 return alg->ops; 167 return alg->ops;
169} 168}
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 3840d1911f2b..78b2d13e80e3 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -190,7 +190,8 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
190 ieee80211_ccmp_aes_encrypt(tfm, b0, s0); 190 ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
191} 191}
192 192
193static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv) 193static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len,
194 u8 *aeskey, int keylen, void *priv)
194{ 195{
195 struct ieee80211_ccmp_data *key = priv; 196 struct ieee80211_ccmp_data *key = priv;
196 int i; 197 int i;
@@ -199,6 +200,9 @@ static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv)
199 if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) 200 if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len)
200 return -1; 201 return -1;
201 202
203 if (aeskey != NULL && keylen >= CCMP_TK_LEN)
204 memcpy(aeskey, key->key, CCMP_TK_LEN);
205
202 pos = skb_push(skb, CCMP_HDR_LEN); 206 pos = skb_push(skb, CCMP_HDR_LEN);
203 memmove(pos, pos + CCMP_HDR_LEN, hdr_len); 207 memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
204 pos += hdr_len; 208 pos += hdr_len;
@@ -238,7 +242,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
238 return -1; 242 return -1;
239 243
240 data_len = skb->len - hdr_len; 244 data_len = skb->len - hdr_len;
241 len = ieee80211_ccmp_hdr(skb, hdr_len, priv); 245 len = ieee80211_ccmp_hdr(skb, hdr_len, NULL, 0, priv);
242 if (len < 0) 246 if (len < 0)
243 return -1; 247 return -1;
244 248
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index e0988320efbf..93def94c1b32 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -80,10 +80,9 @@ static void *ieee80211_tkip_init(int key_idx)
80{ 80{
81 struct ieee80211_tkip_data *priv; 81 struct ieee80211_tkip_data *priv;
82 82
83 priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 83 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
84 if (priv == NULL) 84 if (priv == NULL)
85 goto fail; 85 goto fail;
86 memset(priv, 0, sizeof(*priv));
87 86
88 priv->key_idx = key_idx; 87 priv->key_idx = key_idx;
89 88
@@ -271,34 +270,33 @@ static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK,
271#endif 270#endif
272} 271}
273 272
274static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv) 273static int ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
274 u8 * rc4key, int keylen, void *priv)
275{ 275{
276 struct ieee80211_tkip_data *tkey = priv; 276 struct ieee80211_tkip_data *tkey = priv;
277 int len; 277 int len;
278 u8 *rc4key, *pos, *icv; 278 u8 *pos;
279 struct ieee80211_hdr_4addr *hdr; 279 struct ieee80211_hdr_4addr *hdr;
280 u32 crc;
281 280
282 hdr = (struct ieee80211_hdr_4addr *)skb->data; 281 hdr = (struct ieee80211_hdr_4addr *)skb->data;
283 282
284 if (skb_headroom(skb) < 8 || skb->len < hdr_len) 283 if (skb_headroom(skb) < 8 || skb->len < hdr_len)
285 return NULL; 284 return -1;
285
286 if (rc4key == NULL || keylen < 16)
287 return -1;
286 288
287 if (!tkey->tx_phase1_done) { 289 if (!tkey->tx_phase1_done) {
288 tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, 290 tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
289 tkey->tx_iv32); 291 tkey->tx_iv32);
290 tkey->tx_phase1_done = 1; 292 tkey->tx_phase1_done = 1;
291 } 293 }
292 rc4key = kmalloc(16, GFP_ATOMIC);
293 if (!rc4key)
294 return NULL;
295 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); 294 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
296 295
297 len = skb->len - hdr_len; 296 len = skb->len - hdr_len;
298 pos = skb_push(skb, 8); 297 pos = skb_push(skb, 8);
299 memmove(pos, pos + 8, hdr_len); 298 memmove(pos, pos + 8, hdr_len);
300 pos += hdr_len; 299 pos += hdr_len;
301 icv = skb_put(skb, 4);
302 300
303 *pos++ = *rc4key; 301 *pos++ = *rc4key;
304 *pos++ = *(rc4key + 1); 302 *pos++ = *(rc4key + 1);
@@ -309,28 +307,28 @@ static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv)
309 *pos++ = (tkey->tx_iv32 >> 16) & 0xff; 307 *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
310 *pos++ = (tkey->tx_iv32 >> 24) & 0xff; 308 *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
311 309
312 crc = ~crc32_le(~0, pos, len); 310 tkey->tx_iv16++;
313 icv[0] = crc; 311 if (tkey->tx_iv16 == 0) {
314 icv[1] = crc >> 8; 312 tkey->tx_phase1_done = 0;
315 icv[2] = crc >> 16; 313 tkey->tx_iv32++;
316 icv[3] = crc >> 24; 314 }
317 315
318 return rc4key; 316 return 8;
319} 317}
320 318
321static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 319static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
322{ 320{
323 struct ieee80211_tkip_data *tkey = priv; 321 struct ieee80211_tkip_data *tkey = priv;
324 int len; 322 int len;
325 const u8 *rc4key; 323 u8 rc4key[16], *pos, *icv;
326 u8 *pos; 324 u32 crc;
327 struct scatterlist sg; 325 struct scatterlist sg;
328 326
329 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { 327 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
330 if (net_ratelimit()) { 328 if (net_ratelimit()) {
331 struct ieee80211_hdr_4addr *hdr = 329 struct ieee80211_hdr_4addr *hdr =
332 (struct ieee80211_hdr_4addr *)skb->data; 330 (struct ieee80211_hdr_4addr *)skb->data;
333 printk(KERN_DEBUG "TKIP countermeasures: dropped " 331 printk(KERN_DEBUG ": TKIP countermeasures: dropped "
334 "TX packet to " MAC_FMT "\n", 332 "TX packet to " MAC_FMT "\n",
335 MAC_ARG(hdr->addr1)); 333 MAC_ARG(hdr->addr1));
336 } 334 }
@@ -343,22 +341,23 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
343 len = skb->len - hdr_len; 341 len = skb->len - hdr_len;
344 pos = skb->data + hdr_len; 342 pos = skb->data + hdr_len;
345 343
346 rc4key = ieee80211_tkip_hdr(skb, hdr_len, priv); 344 if ((ieee80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0)
347 if (!rc4key)
348 return -1; 345 return -1;
349 346
347 icv = skb_put(skb, 4);
348
349 crc = ~crc32_le(~0, pos, len);
350 icv[0] = crc;
351 icv[1] = crc >> 8;
352 icv[2] = crc >> 16;
353 icv[3] = crc >> 24;
354
350 crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); 355 crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
351 sg.page = virt_to_page(pos); 356 sg.page = virt_to_page(pos);
352 sg.offset = offset_in_page(pos); 357 sg.offset = offset_in_page(pos);
353 sg.length = len + 4; 358 sg.length = len + 4;
354 crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4); 359 crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4);
355 360
356 tkey->tx_iv16++;
357 if (tkey->tx_iv16 == 0) {
358 tkey->tx_phase1_done = 0;
359 tkey->tx_iv32++;
360 }
361
362 return 0; 361 return 0;
363} 362}
364 363
@@ -379,7 +378,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
379 378
380 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { 379 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
381 if (net_ratelimit()) { 380 if (net_ratelimit()) {
382 printk(KERN_DEBUG "TKIP countermeasures: dropped " 381 printk(KERN_DEBUG ": TKIP countermeasures: dropped "
383 "received packet from " MAC_FMT "\n", 382 "received packet from " MAC_FMT "\n",
384 MAC_ARG(hdr->addr2)); 383 MAC_ARG(hdr->addr2));
385 } 384 }
@@ -695,6 +694,7 @@ static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
695 .name = "TKIP", 694 .name = "TKIP",
696 .init = ieee80211_tkip_init, 695 .init = ieee80211_tkip_init,
697 .deinit = ieee80211_tkip_deinit, 696 .deinit = ieee80211_tkip_deinit,
697 .build_iv = ieee80211_tkip_hdr,
698 .encrypt_mpdu = ieee80211_tkip_encrypt, 698 .encrypt_mpdu = ieee80211_tkip_encrypt,
699 .decrypt_mpdu = ieee80211_tkip_decrypt, 699 .decrypt_mpdu = ieee80211_tkip_decrypt,
700 .encrypt_msdu = ieee80211_michael_mic_add, 700 .encrypt_msdu = ieee80211_michael_mic_add,
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index f8dca31be5dd..649e581fa565 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -76,7 +76,8 @@ static void prism2_wep_deinit(void *priv)
76} 76}
77 77
78/* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ 78/* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */
79static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len, void *priv) 79static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len,
80 u8 *key, int keylen, void *priv)
80{ 81{
81 struct prism2_wep_data *wep = priv; 82 struct prism2_wep_data *wep = priv;
82 u32 klen, len; 83 u32 klen, len;
@@ -131,7 +132,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
131 return -1; 132 return -1;
132 133
133 /* add the IV to the frame */ 134 /* add the IV to the frame */
134 if (prism2_wep_build_iv(skb, hdr_len, priv)) 135 if (prism2_wep_build_iv(skb, hdr_len, NULL, 0, priv))
135 return -1; 136 return -1;
136 137
137 /* Copy the IV into the first 3 bytes of the key */ 138 /* Copy the IV into the first 3 bytes of the key */
diff --git a/net/ieee80211/ieee80211_geo.c b/net/ieee80211/ieee80211_geo.c
index 610cc5cbc252..192243ab35ed 100644
--- a/net/ieee80211/ieee80211_geo.c
+++ b/net/ieee80211/ieee80211_geo.c
@@ -50,7 +50,8 @@ int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
50 50
51 /* Driver needs to initialize the geography map before using 51 /* Driver needs to initialize the geography map before using
52 * these helper functions */ 52 * these helper functions */
53 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); 53 if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
54 return 0;
54 55
55 if (ieee->freq_band & IEEE80211_24GHZ_BAND) 56 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
56 for (i = 0; i < ieee->geo.bg_channels; i++) 57 for (i = 0; i < ieee->geo.bg_channels; i++)
@@ -58,13 +59,15 @@ int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
58 * this is a B only channel, we don't see it 59 * this is a B only channel, we don't see it
59 * as valid. */ 60 * as valid. */
60 if ((ieee->geo.bg[i].channel == channel) && 61 if ((ieee->geo.bg[i].channel == channel) &&
62 !(ieee->geo.bg[i].flags & IEEE80211_CH_INVALID) &&
61 (!(ieee->mode & IEEE_G) || 63 (!(ieee->mode & IEEE_G) ||
62 !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) 64 !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
63 return IEEE80211_24GHZ_BAND; 65 return IEEE80211_24GHZ_BAND;
64 66
65 if (ieee->freq_band & IEEE80211_52GHZ_BAND) 67 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
66 for (i = 0; i < ieee->geo.a_channels; i++) 68 for (i = 0; i < ieee->geo.a_channels; i++)
67 if (ieee->geo.a[i].channel == channel) 69 if ((ieee->geo.a[i].channel == channel) &&
70 !(ieee->geo.a[i].flags & IEEE80211_CH_INVALID))
68 return IEEE80211_52GHZ_BAND; 71 return IEEE80211_52GHZ_BAND;
69 72
70 return 0; 73 return 0;
@@ -76,7 +79,8 @@ int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel)
76 79
77 /* Driver needs to initialize the geography map before using 80 /* Driver needs to initialize the geography map before using
78 * these helper functions */ 81 * these helper functions */
79 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); 82 if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
83 return -1;
80 84
81 if (ieee->freq_band & IEEE80211_24GHZ_BAND) 85 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
82 for (i = 0; i < ieee->geo.bg_channels; i++) 86 for (i = 0; i < ieee->geo.bg_channels; i++)
@@ -97,7 +101,8 @@ u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq)
97 101
98 /* Driver needs to initialize the geography map before using 102 /* Driver needs to initialize the geography map before using
99 * these helper functions */ 103 * these helper functions */
100 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); 104 if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
105 return 0;
101 106
102 freq /= 100000; 107 freq /= 100000;
103 108
@@ -133,6 +138,41 @@ const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee)
133 return &ieee->geo; 138 return &ieee->geo;
134} 139}
135 140
141u8 ieee80211_get_channel_flags(struct ieee80211_device * ieee, u8 channel)
142{
143 int index = ieee80211_channel_to_index(ieee, channel);
144
145 if (index == -1)
146 return IEEE80211_CH_INVALID;
147
148 if (channel <= IEEE80211_24GHZ_CHANNELS)
149 return ieee->geo.bg[index].flags;
150
151 return ieee->geo.a[index].flags;
152}
153
154static const struct ieee80211_channel bad_channel = {
155 .channel = 0,
156 .flags = IEEE80211_CH_INVALID,
157 .max_power = 0,
158};
159
160const struct ieee80211_channel *ieee80211_get_channel(struct ieee80211_device
161 *ieee, u8 channel)
162{
163 int index = ieee80211_channel_to_index(ieee, channel);
164
165 if (index == -1)
166 return &bad_channel;
167
168 if (channel <= IEEE80211_24GHZ_CHANNELS)
169 return &ieee->geo.bg[index];
170
171 return &ieee->geo.a[index];
172}
173
174EXPORT_SYMBOL(ieee80211_get_channel);
175EXPORT_SYMBOL(ieee80211_get_channel_flags);
136EXPORT_SYMBOL(ieee80211_is_valid_channel); 176EXPORT_SYMBOL(ieee80211_is_valid_channel);
137EXPORT_SYMBOL(ieee80211_freq_to_channel); 177EXPORT_SYMBOL(ieee80211_freq_to_channel);
138EXPORT_SYMBOL(ieee80211_channel_to_index); 178EXPORT_SYMBOL(ieee80211_channel_to_index);
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 90d18b72da3d..2cb84d84f671 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -82,10 +82,28 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee)
82 return 0; 82 return 0;
83} 83}
84 84
85void ieee80211_network_reset(struct ieee80211_network *network)
86{
87 if (!network)
88 return;
89
90 if (network->ibss_dfs) {
91 kfree(network->ibss_dfs);
92 network->ibss_dfs = NULL;
93 }
94}
95
85static inline void ieee80211_networks_free(struct ieee80211_device *ieee) 96static inline void ieee80211_networks_free(struct ieee80211_device *ieee)
86{ 97{
98 int i;
99
87 if (!ieee->networks) 100 if (!ieee->networks)
88 return; 101 return;
102
103 for (i = 0; i < MAX_NETWORK_COUNT; i++)
104 if (ieee->networks[i].ibss_dfs)
105 kfree(ieee->networks[i].ibss_dfs);
106
89 kfree(ieee->networks); 107 kfree(ieee->networks);
90 ieee->networks = NULL; 108 ieee->networks = NULL;
91} 109}
@@ -195,7 +213,7 @@ void free_ieee80211(struct net_device *dev)
195 213
196static int debug = 0; 214static int debug = 0;
197u32 ieee80211_debug_level = 0; 215u32 ieee80211_debug_level = 0;
198struct proc_dir_entry *ieee80211_proc = NULL; 216static struct proc_dir_entry *ieee80211_proc = NULL;
199 217
200static int show_debug_level(char *page, char **start, off_t offset, 218static int show_debug_level(char *page, char **start, off_t offset,
201 int count, int *eof, void *data) 219 int count, int *eof, void *data)
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 7ac6a7165d9c..a7f2a642a512 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -369,8 +369,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
369 369
370 /* Put this code here so that we avoid duplicating it in all 370 /* Put this code here so that we avoid duplicating it in all
371 * Rx paths. - Jean II */ 371 * Rx paths. - Jean II */
372#ifdef CONFIG_WIRELESS_EXT
372#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ 373#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
373#ifdef CONFIG_NET_RADIO
374 /* If spy monitoring on */ 374 /* If spy monitoring on */
375 if (ieee->spy_data.spy_number > 0) { 375 if (ieee->spy_data.spy_number > 0) {
376 struct iw_quality wstats; 376 struct iw_quality wstats;
@@ -397,8 +397,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
397 /* Update spy records */ 397 /* Update spy records */
398 wireless_spy_update(ieee->dev, hdr->addr2, &wstats); 398 wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
399 } 399 }
400#endif /* CONFIG_NET_RADIO */
401#endif /* IW_WIRELESS_SPY */ 400#endif /* IW_WIRELESS_SPY */
401#endif /* CONFIG_WIRELESS_EXT */
402 402
403#ifdef NOT_YET 403#ifdef NOT_YET
404 hostap_update_rx_stats(local->ap, hdr, rx_stats); 404 hostap_update_rx_stats(local->ap, hdr, rx_stats);
@@ -574,7 +574,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
574 /* skb: hdr + (possibly fragmented) plaintext payload */ 574 /* skb: hdr + (possibly fragmented) plaintext payload */
575 // PR: FIXME: hostap has additional conditions in the "if" below: 575 // PR: FIXME: hostap has additional conditions in the "if" below:
576 // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && 576 // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
577 if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) { 577 if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) {
578 int flen; 578 int flen;
579 struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); 579 struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr);
580 IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); 580 IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
@@ -754,7 +754,14 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
754 memset(skb->cb, 0, sizeof(skb->cb)); 754 memset(skb->cb, 0, sizeof(skb->cb));
755 skb->dev = dev; 755 skb->dev = dev;
756 skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ 756 skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
757 netif_rx(skb); 757 if (netif_rx(skb) == NET_RX_DROP) {
758 /* netif_rx always succeeds, but it might drop
759 * the packet. If it drops the packet, we log that
760 * in our stats. */
761 IEEE80211_DEBUG_DROP
762 ("RX: netif_rx dropped the packet\n");
763 stats->rx_dropped++;
764 }
758 } 765 }
759 766
760 rx_exit: 767 rx_exit:
@@ -930,6 +937,45 @@ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
930 return rc; 937 return rc;
931} 938}
932 939
940#ifdef CONFIG_IEEE80211_DEBUG
941#define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x
942
943static const char *get_info_element_string(u16 id)
944{
945 switch (id) {
946 MFIE_STRING(SSID);
947 MFIE_STRING(RATES);
948 MFIE_STRING(FH_SET);
949 MFIE_STRING(DS_SET);
950 MFIE_STRING(CF_SET);
951 MFIE_STRING(TIM);
952 MFIE_STRING(IBSS_SET);
953 MFIE_STRING(COUNTRY);
954 MFIE_STRING(HOP_PARAMS);
955 MFIE_STRING(HOP_TABLE);
956 MFIE_STRING(REQUEST);
957 MFIE_STRING(CHALLENGE);
958 MFIE_STRING(POWER_CONSTRAINT);
959 MFIE_STRING(POWER_CAPABILITY);
960 MFIE_STRING(TPC_REQUEST);
961 MFIE_STRING(TPC_REPORT);
962 MFIE_STRING(SUPP_CHANNELS);
963 MFIE_STRING(CSA);
964 MFIE_STRING(MEASURE_REQUEST);
965 MFIE_STRING(MEASURE_REPORT);
966 MFIE_STRING(QUIET);
967 MFIE_STRING(IBSS_DFS);
968 MFIE_STRING(ERP_INFO);
969 MFIE_STRING(RSN);
970 MFIE_STRING(RATES_EX);
971 MFIE_STRING(GENERIC);
972 MFIE_STRING(QOS_PARAMETER);
973 default:
974 return "UNKNOWN";
975 }
976}
977#endif
978
933static int ieee80211_parse_info_param(struct ieee80211_info_element 979static int ieee80211_parse_info_param(struct ieee80211_info_element
934 *info_element, u16 length, 980 *info_element, u16 length,
935 struct ieee80211_network *network) 981 struct ieee80211_network *network)
@@ -1040,7 +1086,9 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element
1040 break; 1086 break;
1041 1087
1042 case MFIE_TYPE_TIM: 1088 case MFIE_TYPE_TIM:
1043 IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: ignored\n"); 1089 network->tim.tim_count = info_element->data[0];
1090 network->tim.tim_period = info_element->data[1];
1091 IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n");
1044 break; 1092 break;
1045 1093
1046 case MFIE_TYPE_ERP_INFO: 1094 case MFIE_TYPE_ERP_INFO:
@@ -1091,10 +1139,49 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element
1091 printk(KERN_ERR 1139 printk(KERN_ERR
1092 "QoS Error need to parse QOS_PARAMETER IE\n"); 1140 "QoS Error need to parse QOS_PARAMETER IE\n");
1093 break; 1141 break;
1142 /* 802.11h */
1143 case MFIE_TYPE_POWER_CONSTRAINT:
1144 network->power_constraint = info_element->data[0];
1145 network->flags |= NETWORK_HAS_POWER_CONSTRAINT;
1146 break;
1147
1148 case MFIE_TYPE_CSA:
1149 network->power_constraint = info_element->data[0];
1150 network->flags |= NETWORK_HAS_CSA;
1151 break;
1152
1153 case MFIE_TYPE_QUIET:
1154 network->quiet.count = info_element->data[0];
1155 network->quiet.period = info_element->data[1];
1156 network->quiet.duration = info_element->data[2];
1157 network->quiet.offset = info_element->data[3];
1158 network->flags |= NETWORK_HAS_QUIET;
1159 break;
1160
1161 case MFIE_TYPE_IBSS_DFS:
1162 if (network->ibss_dfs)
1163 break;
1164 network->ibss_dfs =
1165 kmalloc(info_element->len, GFP_ATOMIC);
1166 if (!network->ibss_dfs)
1167 return 1;
1168 memcpy(network->ibss_dfs, info_element->data,
1169 info_element->len);
1170 network->flags |= NETWORK_HAS_IBSS_DFS;
1171 break;
1172
1173 case MFIE_TYPE_TPC_REPORT:
1174 network->tpc_report.transmit_power =
1175 info_element->data[0];
1176 network->tpc_report.link_margin = info_element->data[1];
1177 network->flags |= NETWORK_HAS_TPC_REPORT;
1178 break;
1094 1179
1095 default: 1180 default:
1096 IEEE80211_DEBUG_MGMT("unsupported IE %d\n", 1181 IEEE80211_DEBUG_MGMT
1097 info_element->id); 1182 ("Unsupported info element: %s (%d)\n",
1183 get_info_element_string(info_element->id),
1184 info_element->id);
1098 break; 1185 break;
1099 } 1186 }
1100 1187
@@ -1110,7 +1197,9 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element
1110static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response 1197static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response
1111 *frame, struct ieee80211_rx_stats *stats) 1198 *frame, struct ieee80211_rx_stats *stats)
1112{ 1199{
1113 struct ieee80211_network network_resp; 1200 struct ieee80211_network network_resp = {
1201 .ibss_dfs = NULL,
1202 };
1114 struct ieee80211_network *network = &network_resp; 1203 struct ieee80211_network *network = &network_resp;
1115 struct net_device *dev = ieee->dev; 1204 struct net_device *dev = ieee->dev;
1116 1205
@@ -1253,7 +1342,22 @@ static void update_network(struct ieee80211_network *dst,
1253 int qos_active; 1342 int qos_active;
1254 u8 old_param; 1343 u8 old_param;
1255 1344
1256 memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); 1345 ieee80211_network_reset(dst);
1346 dst->ibss_dfs = src->ibss_dfs;
1347
1348 /* We only update the statistics if they were created by receiving
1349 * the network information on the actual channel the network is on.
1350 *
1351 * This keeps beacons received on neighbor channels from bringing
1352 * down the signal level of an AP. */
1353 if (dst->channel == src->stats.received_channel)
1354 memcpy(&dst->stats, &src->stats,
1355 sizeof(struct ieee80211_rx_stats));
1356 else
1357 IEEE80211_DEBUG_SCAN("Network " MAC_FMT " info received "
1358 "off channel (%d vs. %d)\n", MAC_ARG(src->bssid),
1359 dst->channel, src->stats.received_channel);
1360
1257 dst->capability = src->capability; 1361 dst->capability = src->capability;
1258 memcpy(dst->rates, src->rates, src->rates_len); 1362 memcpy(dst->rates, src->rates, src->rates_len);
1259 dst->rates_len = src->rates_len; 1363 dst->rates_len = src->rates_len;
@@ -1269,6 +1373,7 @@ static void update_network(struct ieee80211_network *dst,
1269 dst->listen_interval = src->listen_interval; 1373 dst->listen_interval = src->listen_interval;
1270 dst->atim_window = src->atim_window; 1374 dst->atim_window = src->atim_window;
1271 dst->erp_value = src->erp_value; 1375 dst->erp_value = src->erp_value;
1376 dst->tim = src->tim;
1272 1377
1273 memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); 1378 memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
1274 dst->wpa_ie_len = src->wpa_ie_len; 1379 dst->wpa_ie_len = src->wpa_ie_len;
@@ -1313,7 +1418,9 @@ static void ieee80211_process_probe_response(struct ieee80211_device
1313 *stats) 1418 *stats)
1314{ 1419{
1315 struct net_device *dev = ieee->dev; 1420 struct net_device *dev = ieee->dev;
1316 struct ieee80211_network network; 1421 struct ieee80211_network network = {
1422 .ibss_dfs = NULL,
1423 };
1317 struct ieee80211_network *target; 1424 struct ieee80211_network *target;
1318 struct ieee80211_network *oldest = NULL; 1425 struct ieee80211_network *oldest = NULL;
1319#ifdef CONFIG_IEEE80211_DEBUG 1426#ifdef CONFIG_IEEE80211_DEBUG
@@ -1386,6 +1493,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device
1386 escape_essid(target->ssid, 1493 escape_essid(target->ssid,
1387 target->ssid_len), 1494 target->ssid_len),
1388 MAC_ARG(target->bssid)); 1495 MAC_ARG(target->bssid));
1496 ieee80211_network_reset(target);
1389 } else { 1497 } else {
1390 /* Otherwise just pull from the free list */ 1498 /* Otherwise just pull from the free list */
1391 target = list_entry(ieee->network_free_list.next, 1499 target = list_entry(ieee->network_free_list.next,
@@ -1402,6 +1510,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device
1402 "BEACON" : "PROBE RESPONSE"); 1510 "BEACON" : "PROBE RESPONSE");
1403#endif 1511#endif
1404 memcpy(target, &network, sizeof(*target)); 1512 memcpy(target, &network, sizeof(*target));
1513 network.ibss_dfs = NULL;
1405 list_add_tail(&target->list, &ieee->network_list); 1514 list_add_tail(&target->list, &ieee->network_list);
1406 } else { 1515 } else {
1407 IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n", 1516 IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n",
@@ -1411,6 +1520,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device
1411 is_beacon(beacon->header.frame_ctl) ? 1520 is_beacon(beacon->header.frame_ctl) ?
1412 "BEACON" : "PROBE RESPONSE"); 1521 "BEACON" : "PROBE RESPONSE");
1413 update_network(target, &network); 1522 update_network(target, &network);
1523 network.ibss_dfs = NULL;
1414 } 1524 }
1415 1525
1416 spin_unlock_irqrestore(&ieee->lock, flags); 1526 spin_unlock_irqrestore(&ieee->lock, flags);
@@ -1495,10 +1605,43 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1495 header); 1605 header);
1496 break; 1606 break;
1497 1607
1608 case IEEE80211_STYPE_ACTION:
1609 IEEE80211_DEBUG_MGMT("ACTION\n");
1610 if (ieee->handle_action)
1611 ieee->handle_action(ieee->dev,
1612 (struct ieee80211_action *)
1613 header, stats);
1614 break;
1615
1616 case IEEE80211_STYPE_REASSOC_REQ:
1617 IEEE80211_DEBUG_MGMT("received reassoc (%d)\n",
1618 WLAN_FC_GET_STYPE(le16_to_cpu
1619 (header->frame_ctl)));
1620
1621 IEEE80211_WARNING("%s: IEEE80211_REASSOC_REQ received\n",
1622 ieee->dev->name);
1623 if (ieee->handle_reassoc_request != NULL)
1624 ieee->handle_reassoc_request(ieee->dev,
1625 (struct ieee80211_reassoc_request *)
1626 header);
1627 break;
1628
1629 case IEEE80211_STYPE_ASSOC_REQ:
1630 IEEE80211_DEBUG_MGMT("received assoc (%d)\n",
1631 WLAN_FC_GET_STYPE(le16_to_cpu
1632 (header->frame_ctl)));
1633
1634 IEEE80211_WARNING("%s: IEEE80211_ASSOC_REQ received\n",
1635 ieee->dev->name);
1636 if (ieee->handle_assoc_request != NULL)
1637 ieee->handle_assoc_request(ieee->dev);
1638 break;
1639
1498 case IEEE80211_STYPE_DEAUTH: 1640 case IEEE80211_STYPE_DEAUTH:
1499 printk("DEAUTH from AP\n"); 1641 IEEE80211_DEBUG_MGMT("DEAUTH\n");
1500 if (ieee->handle_deauth != NULL) 1642 if (ieee->handle_deauth != NULL)
1501 ieee->handle_deauth(ieee->dev, (struct ieee80211_auth *) 1643 ieee->handle_deauth(ieee->dev,
1644 (struct ieee80211_deauth *)
1502 header); 1645 header);
1503 break; 1646 break;
1504 default: 1647 default:
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index 8fdd943ebe8e..8b4332f53394 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -56,7 +56,18 @@ Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
56 `--------------------------------------------------| |------' 56 `--------------------------------------------------| |------'
57Total: 28 non-data bytes `----.----' 57Total: 28 non-data bytes `----.----'
58 | 58 |
59 .- 'Frame data' expands to <---------------------------' 59 .- 'Frame data' expands, if WEP enabled, to <----------'
60 |
61 V
62 ,-----------------------.
63Bytes | 4 | 0-2296 | 4 |
64 |-----|-----------|-----|
65Desc. | IV | Encrypted | ICV |
66 | | Packet | |
67 `-----| |-----'
68 `-----.-----'
69 |
70 .- 'Encrypted Packet' expands to
60 | 71 |
61 V 72 V
62 ,---------------------------------------------------. 73 ,---------------------------------------------------.
@@ -65,18 +76,7 @@ Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
65Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | 76Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
66 | DSAP | SSAP | | | | Packet | 77 | DSAP | SSAP | | | | Packet |
67 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | 78 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
68 `-----------------------------------------| | 79 `----------------------------------------------------
69Total: 8 non-data bytes `----.----'
70 |
71 .- 'IP Packet' expands, if WEP enabled, to <--'
72 |
73 V
74 ,-----------------------.
75Bytes | 4 | 0-2296 | 4 |
76 |-----|-----------|-----|
77Desc. | IV | Encrypted | ICV |
78 | | IP Packet | |
79 `-----------------------'
80Total: 8 non-data bytes 80Total: 8 non-data bytes
81 81
82802.3 Ethernet Data Frame 82802.3 Ethernet Data Frame
@@ -470,7 +470,9 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
470 atomic_inc(&crypt->refcnt); 470 atomic_inc(&crypt->refcnt);
471 if (crypt->ops->build_iv) 471 if (crypt->ops->build_iv)
472 crypt->ops->build_iv(skb_frag, hdr_len, 472 crypt->ops->build_iv(skb_frag, hdr_len,
473 crypt->priv); 473 ieee->sec.keys[ieee->sec.active_key],
474 ieee->sec.key_sizes[ieee->sec.active_key],
475 crypt->priv);
474 atomic_dec(&crypt->refcnt); 476 atomic_dec(&crypt->refcnt);
475 } 477 }
476 478
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index f87c6b89f845..af7f9bbfd18a 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -149,9 +149,7 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee,
149 iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | 149 iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID |
150 IW_QUAL_LEVEL_INVALID; 150 IW_QUAL_LEVEL_INVALID;
151 iwe.u.qual.qual = 0; 151 iwe.u.qual.qual = 0;
152 iwe.u.qual.level = 0;
153 } else { 152 } else {
154 iwe.u.qual.level = network->stats.rssi;
155 if (ieee->perfect_rssi == ieee->worst_rssi) 153 if (ieee->perfect_rssi == ieee->worst_rssi)
156 iwe.u.qual.qual = 100; 154 iwe.u.qual.qual = 100;
157 else 155 else
@@ -179,6 +177,13 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee,
179 iwe.u.qual.noise = network->stats.noise; 177 iwe.u.qual.noise = network->stats.noise;
180 } 178 }
181 179
180 if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) {
181 iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
182 iwe.u.qual.level = 0;
183 } else {
184 iwe.u.qual.level = network->stats.signal;
185 }
186
182 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); 187 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN);
183 188
184 iwe.cmd = IWEVCUSTOM; 189 iwe.cmd = IWEVCUSTOM;
@@ -188,33 +193,21 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee,
188 if (iwe.u.data.length) 193 if (iwe.u.data.length)
189 start = iwe_stream_add_point(start, stop, &iwe, custom); 194 start = iwe_stream_add_point(start, stop, &iwe, custom);
190 195
196 memset(&iwe, 0, sizeof(iwe));
191 if (network->wpa_ie_len) { 197 if (network->wpa_ie_len) {
192 char buf[MAX_WPA_IE_LEN * 2 + 30]; 198 char buf[MAX_WPA_IE_LEN];
193 199 memcpy(buf, network->wpa_ie, network->wpa_ie_len);
194 u8 *p = buf; 200 iwe.cmd = IWEVGENIE;
195 p += sprintf(p, "wpa_ie="); 201 iwe.u.data.length = network->wpa_ie_len;
196 for (i = 0; i < network->wpa_ie_len; i++) {
197 p += sprintf(p, "%02x", network->wpa_ie[i]);
198 }
199
200 memset(&iwe, 0, sizeof(iwe));
201 iwe.cmd = IWEVCUSTOM;
202 iwe.u.data.length = strlen(buf);
203 start = iwe_stream_add_point(start, stop, &iwe, buf); 202 start = iwe_stream_add_point(start, stop, &iwe, buf);
204 } 203 }
205 204
205 memset(&iwe, 0, sizeof(iwe));
206 if (network->rsn_ie_len) { 206 if (network->rsn_ie_len) {
207 char buf[MAX_WPA_IE_LEN * 2 + 30]; 207 char buf[MAX_WPA_IE_LEN];
208 208 memcpy(buf, network->rsn_ie, network->rsn_ie_len);
209 u8 *p = buf; 209 iwe.cmd = IWEVGENIE;
210 p += sprintf(p, "rsn_ie="); 210 iwe.u.data.length = network->rsn_ie_len;
211 for (i = 0; i < network->rsn_ie_len; i++) {
212 p += sprintf(p, "%02x", network->rsn_ie[i]);
213 }
214
215 memset(&iwe, 0, sizeof(iwe));
216 iwe.cmd = IWEVCUSTOM;
217 iwe.u.data.length = strlen(buf);
218 start = iwe_stream_add_point(start, stop, &iwe, buf); 211 start = iwe_stream_add_point(start, stop, &iwe, buf);
219 } 212 }
220 213
@@ -229,6 +222,28 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee,
229 if (iwe.u.data.length) 222 if (iwe.u.data.length)
230 start = iwe_stream_add_point(start, stop, &iwe, custom); 223 start = iwe_stream_add_point(start, stop, &iwe, custom);
231 224
225 /* Add spectrum management information */
226 iwe.cmd = -1;
227 p = custom;
228 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: ");
229
230 if (ieee80211_get_channel_flags(ieee, network->channel) &
231 IEEE80211_CH_INVALID) {
232 iwe.cmd = IWEVCUSTOM;
233 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID ");
234 }
235
236 if (ieee80211_get_channel_flags(ieee, network->channel) &
237 IEEE80211_CH_RADAR_DETECT) {
238 iwe.cmd = IWEVCUSTOM;
239 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS ");
240 }
241
242 if (iwe.cmd == IWEVCUSTOM) {
243 iwe.u.data.length = p - custom;
244 start = iwe_stream_add_point(start, stop, &iwe, custom);
245 }
246
232 return start; 247 return start;
233} 248}
234 249
@@ -734,9 +749,98 @@ int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
734 return 0; 749 return 0;
735} 750}
736 751
752int ieee80211_wx_set_auth(struct net_device *dev,
753 struct iw_request_info *info,
754 union iwreq_data *wrqu,
755 char *extra)
756{
757 struct ieee80211_device *ieee = netdev_priv(dev);
758 unsigned long flags;
759 int err = 0;
760
761 spin_lock_irqsave(&ieee->lock, flags);
762
763 switch (wrqu->param.flags & IW_AUTH_INDEX) {
764 case IW_AUTH_WPA_VERSION:
765 case IW_AUTH_CIPHER_PAIRWISE:
766 case IW_AUTH_CIPHER_GROUP:
767 case IW_AUTH_KEY_MGMT:
768 /*
769 * Host AP driver does not use these parameters and allows
770 * wpa_supplicant to control them internally.
771 */
772 break;
773 case IW_AUTH_TKIP_COUNTERMEASURES:
774 break; /* FIXME */
775 case IW_AUTH_DROP_UNENCRYPTED:
776 ieee->drop_unencrypted = !!wrqu->param.value;
777 break;
778 case IW_AUTH_80211_AUTH_ALG:
779 break; /* FIXME */
780 case IW_AUTH_WPA_ENABLED:
781 ieee->privacy_invoked = ieee->wpa_enabled = !!wrqu->param.value;
782 break;
783 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
784 ieee->ieee802_1x = !!wrqu->param.value;
785 break;
786 case IW_AUTH_PRIVACY_INVOKED:
787 ieee->privacy_invoked = !!wrqu->param.value;
788 break;
789 default:
790 err = -EOPNOTSUPP;
791 break;
792 }
793 spin_unlock_irqrestore(&ieee->lock, flags);
794 return err;
795}
796
797int ieee80211_wx_get_auth(struct net_device *dev,
798 struct iw_request_info *info,
799 union iwreq_data *wrqu,
800 char *extra)
801{
802 struct ieee80211_device *ieee = netdev_priv(dev);
803 unsigned long flags;
804 int err = 0;
805
806 spin_lock_irqsave(&ieee->lock, flags);
807
808 switch (wrqu->param.flags & IW_AUTH_INDEX) {
809 case IW_AUTH_WPA_VERSION:
810 case IW_AUTH_CIPHER_PAIRWISE:
811 case IW_AUTH_CIPHER_GROUP:
812 case IW_AUTH_KEY_MGMT:
813 case IW_AUTH_TKIP_COUNTERMEASURES: /* FIXME */
814 case IW_AUTH_80211_AUTH_ALG: /* FIXME */
815 /*
816 * Host AP driver does not use these parameters and allows
817 * wpa_supplicant to control them internally.
818 */
819 err = -EOPNOTSUPP;
820 break;
821 case IW_AUTH_DROP_UNENCRYPTED:
822 wrqu->param.value = ieee->drop_unencrypted;
823 break;
824 case IW_AUTH_WPA_ENABLED:
825 wrqu->param.value = ieee->wpa_enabled;
826 break;
827 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
828 wrqu->param.value = ieee->ieee802_1x;
829 break;
830 default:
831 err = -EOPNOTSUPP;
832 break;
833 }
834 spin_unlock_irqrestore(&ieee->lock, flags);
835 return err;
836}
837
737EXPORT_SYMBOL(ieee80211_wx_set_encodeext); 838EXPORT_SYMBOL(ieee80211_wx_set_encodeext);
738EXPORT_SYMBOL(ieee80211_wx_get_encodeext); 839EXPORT_SYMBOL(ieee80211_wx_get_encodeext);
739 840
740EXPORT_SYMBOL(ieee80211_wx_get_scan); 841EXPORT_SYMBOL(ieee80211_wx_get_scan);
741EXPORT_SYMBOL(ieee80211_wx_set_encode); 842EXPORT_SYMBOL(ieee80211_wx_set_encode);
742EXPORT_SYMBOL(ieee80211_wx_get_encode); 843EXPORT_SYMBOL(ieee80211_wx_get_encode);
844
845EXPORT_SYMBOL_GPL(ieee80211_wx_set_auth);
846EXPORT_SYMBOL_GPL(ieee80211_wx_get_auth);
diff --git a/net/socket.c b/net/socket.c
index a00851f981db..7e1bdef8b09e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -84,10 +84,7 @@
84#include <linux/compat.h> 84#include <linux/compat.h>
85#include <linux/kmod.h> 85#include <linux/kmod.h>
86#include <linux/audit.h> 86#include <linux/audit.h>
87 87#include <linux/wireless.h>
88#ifdef CONFIG_NET_RADIO
89#include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
90#endif /* CONFIG_NET_RADIO */
91 88
92#include <asm/uaccess.h> 89#include <asm/uaccess.h>
93#include <asm/unistd.h> 90#include <asm/unistd.h>
@@ -840,11 +837,11 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
840 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { 837 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
841 err = dev_ioctl(cmd, argp); 838 err = dev_ioctl(cmd, argp);
842 } else 839 } else
843#ifdef WIRELESS_EXT 840#ifdef CONFIG_WIRELESS_EXT
844 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { 841 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
845 err = dev_ioctl(cmd, argp); 842 err = dev_ioctl(cmd, argp);
846 } else 843 } else
847#endif /* WIRELESS_EXT */ 844#endif /* CONFIG_WIRELESS_EXT */
848 switch (cmd) { 845 switch (cmd) {
849 case FIOSETOWN: 846 case FIOSETOWN:
850 case SIOCSPGRP: 847 case SIOCSPGRP: